llama_ros: llama.cpp for ROS 2
Loading...
Searching...
No Matches
llava_ros::Llava Class Reference

#include <llava.hpp>

Inheritance diagram for llava_ros::Llava:
Collaboration diagram for llava_ros::Llava:

Public Member Functions

 Llava (const struct common_params &params, const struct LlavaParams &llava_params, std::string system_prompt="")
 
 ~Llava ()
 
void reset () override
 
bool load_image (std::string base64_str)
 
struct llava_image_embed * base64_image_to_embed (const std::string &base64_str)
 
- Public Member Functions inherited from llama_ros::Llama
 Llama (const struct common_params &params, std::string system_prompt="", bool initial_reset=true)
 
virtual ~Llama ()
 
std::vector< llama_token > tokenize (const std::string &text, bool add_bos, bool special=false)
 
std::string detokenize (const std::vector< llama_token > &tokens)
 
void cancel ()
 
std::string format_chat_prompt (std::vector< struct common_chat_msg > chat_msgs, bool add_ass)
 
std::vector< struct LoRAlist_loras ()
 
void update_loras (std::vector< struct LoRA > loras)
 
std::vector< llama_token > truncate_tokens (const std::vector< llama_token > &tokens, int limit_size, bool add_eos=true)
 
struct EmbeddingsOuput generate_embeddings (const std::string &input_prompt, int normalization=2)
 
struct EmbeddingsOuput generate_embeddings (const std::vector< llama_token > &tokens, int normalization=2)
 
float rank_document (const std::string &query, const std::string &document)
 
std::vector< float > rank_documents (const std::string &query, const std::vector< std::string > &documents)
 
struct ResponseOutput generate_response (const std::string &input_prompt, struct common_params_sampling sparams, GenerateResponseCallback callbakc=nullptr, std::vector< std::string > stop={})
 
struct ResponseOutput generate_response (const std::string &input_prompt, GenerateResponseCallback callbakc=nullptr, std::vector< std::string > stop={})
 
const struct llama_context * get_ctx ()
 
const struct llama_model * get_model ()
 
const struct llama_vocab * get_vocab ()
 
int get_n_ctx ()
 
int get_n_ctx_train ()
 
int get_n_embd ()
 
int get_n_vocab ()
 
std::string get_metadata (const std::string &key, size_t size)
 
std::string get_metadata (const std::string &model_name, const std::string &key, size_t size)
 
int get_int_metadata (const std::string &key, size_t size)
 
int get_int_metadata (const std::string &model_name, const std::string &key, size_t size)
 
float get_float_metadata (const std::string &key, size_t size)
 
float get_float_metadata (const std::string &model_name, const std::string &key, size_t size)
 
struct Metadata get_metadata ()
 
bool is_embedding ()
 
bool is_reranking ()
 
bool add_bos_token ()
 
bool is_eog ()
 
llama_token get_token_eos ()
 
llama_token get_token_bos ()
 
llama_token get_token_sep ()
 

Protected Member Functions

void load_prompt (const std::string &input_prompt, bool add_pfx, bool add_sfx) override
 
bool eval_image (struct llava_image_embed *image_embed)
 
bool eval_prompt () override
 
bool eval (struct llama_batch batch) override
 
- Protected Member Functions inherited from llama_ros::Llama
StopType find_stop (std::vector< struct CompletionOutput > completion_result_list, std::vector< std::string > stopping_words)
 
StopType find_stop_word (std::vector< struct CompletionOutput > completion_result_list, std::string stopping_word)
 
bool eval_system_prompt ()
 
bool eval_prompt (std::vector< llama_token > prompt_tokens)
 
bool eval_token (llama_token token)
 
bool eval (std::vector< llama_token > tokens)
 
std::vector< struct TokenProbget_probs ()
 
struct CompletionOutput sample ()
 

Protected Attributes

struct llava_image_embed * image_embed
 
struct clip_ctx * ctx_clip
 
struct LlavaParams llava_params
 
- Protected Attributes inherited from llama_ros::Llama
struct common_params params
 
struct common_init_result llama_init
 
struct llama_context * ctx
 
struct llama_model * model
 
std::vector< common_adapter_lora_info > lora_adapters
 
struct common_sampler * sampler
 
struct ggml_threadpool * threadpool
 
struct ggml_threadpool * threadpool_batch
 
std::string system_prompt
 
bool canceled
 
llama_utils::Spinner spinner
 
std::vector< llama_token > prompt_tokens
 
int32_t n_past
 
int32_t n_consumed
 
int32_t ga_i
 

Private Member Functions

void free_image ()
 

Private Attributes

int image_pose
 
int st_pos_id
 

Constructor & Destructor Documentation

◆ Llava()

Llava::Llava ( const struct common_params & params,
const struct LlavaParams & llava_params,
std::string system_prompt = "" )

◆ ~Llava()

Llava::~Llava ( )

Member Function Documentation

◆ base64_image_to_embed()

struct llava_image_embed * Llava::base64_image_to_embed ( const std::string & base64_str)

◆ eval()

bool Llava::eval ( struct llama_batch batch)
overrideprotectedvirtual

Reimplemented from llama_ros::Llama.

◆ eval_image()

bool Llava::eval_image ( struct llava_image_embed * image_embed)
protected

◆ eval_prompt()

bool Llava::eval_prompt ( )
overrideprotectedvirtual

Reimplemented from llama_ros::Llama.

◆ free_image()

void Llava::free_image ( )
private

◆ load_image()

bool Llava::load_image ( std::string base64_str)

◆ load_prompt()

void Llava::load_prompt ( const std::string & input_prompt,
bool add_pfx,
bool add_sfx )
overrideprotectedvirtual

Reimplemented from llama_ros::Llama.

◆ reset()

void Llava::reset ( )
overridevirtual

Reimplemented from llama_ros::Llama.

Member Data Documentation

◆ ctx_clip

struct clip_ctx* llava_ros::Llava::ctx_clip
protected

◆ image_embed

struct llava_image_embed* llava_ros::Llava::image_embed
protected

◆ image_pose

int llava_ros::Llava::image_pose
private

◆ llava_params

struct LlavaParams llava_ros::Llava::llava_params
protected

◆ st_pos_id

int llava_ros::Llava::st_pos_id
private

The documentation for this class was generated from the following files: