llama_ros: llama.cpp for ROS 2
Loading...
Searching...
No Matches
llama_ros.langchain.chat_llama_ros.ChatLlamaROS Class Reference
Inheritance diagram for llama_ros.langchain.chat_llama_ros.ChatLlamaROS:
Collaboration diagram for llama_ros.langchain.chat_llama_ros.ChatLlamaROS:

Public Member Functions

Runnable[LanguageModelInput, BaseMessage] bind_tools (self, Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]] tools, *, Optional[Union[dict, str, Literal["auto", "all", "one", "any"], bool]] tool_choice="auto", Literal["function_calling", "json_schema", "json_mode"] method="function_calling", **Any kwargs)
 
Runnable[LanguageModelInput, Union[Dict, BaseModel]] with_structured_output (self, Optional[Union[Dict, Type[BaseModel], Type]] schema=None, *, bool include_raw=False, Literal["function_calling", "json_schema", "json_mode"] method="function_calling", **Any kwargs)
 
- Public Member Functions inherited from llama_ros.langchain.llama_ros_common.LlamaROSCommon
Dict validate_environment (cls, Dict values)
 
None cancel (self)
 

Static Public Attributes

bool use_default_template = False
 
bool use_gguf_template = True
 
ImmutableSandboxedEnvironment jinja_env
 
- Static Public Attributes inherited from llama_ros.langchain.llama_ros_common.LlamaROSCommon
LlamaClientNode llama_client = None
 
CvBridge cv_bridge = CvBridge()
 
Metadata model_metadata = None
 
int n_prev = 64
 
int n_probs = 1
 
int min_keep = 0
 
bool ignore_eos = False
 
dict logit_bias = {}
 
float temp = 0.80
 
float dynatemp_range = 0.0
 
float dynatemp_exponent = 1.0
 
int top_k = 40
 
float top_p = 0.95
 
float min_p = 0.05
 
float xtc_probability = 0.0
 
float xtc_threshold = 0.1
 
float typical_p = 1.00
 
int penalty_last_n = 64
 
float penalty_repeat = 1.00
 
float penalty_freq = 0.00
 
float penalty_present = 0.00
 
float dry_multiplier = 0.0
 
float dry_base = 1.75
 
int dry_allowed_length = 2
 
int dry_penalty_last_n = -1
 
list dry_sequence_breakers = ["\\n", ":", '\\"', "*"]
 
int mirostat = 0
 
float mirostat_eta = 0.10
 
float mirostat_tau = 5.0
 
str samplers_sequence = "edkypmxt"
 
str grammar = ""
 
str grammar_schema = ""
 
list penalty_prompt_tokens = []
 
bool use_penalty_prompt_tokens = False
 

Protected Member Functions

Dict[str, Any] _default_params (self)
 
str _llm_type (self)
 
str _generate_prompt (self, List[dict[str, str]] messages, **kwargs)
 
List[Dict[str, str]] _convert_content (self, Union[Dict[str, str], str, List[str], List[Dict[str, str]]] content)
 
list[dict[str, str]] _convert_message_to_dict (self, BaseMessage message)
 
Tuple[Dict[str, str], Optional[str], Optional[str]] _extract_data_from_messages (self, List[BaseMessage] messages)
 
List[BaseMessage] _create_chat_generations (self, GenerateResponse.Result response, str method)
 
str _generate (self, List[BaseMessage] messages, Optional[List[str]] stop=None, Optional[CallbackManagerForLLMRun] run_manager=None, **Any kwargs)
 
Iterator[ChatGenerationChunk] _stream (self, List[BaseMessage] messages, Optional[List[str]] stop=None, Optional[CallbackManagerForLLMRun] run_manager=None, **Any kwargs)
 
- Protected Member Functions inherited from llama_ros.langchain.llama_ros_common.LlamaROSCommon
GenerateResponse.Result _create_action_goal (self, str prompt, Optional[List[str]] stop=None, Optional[str] image_url=None, Optional[np.ndarray] image=None, Optional[str] tools_grammar=None, **kwargs)
 

Member Function Documentation

◆ _convert_content()

List[Dict[str, str]] llama_ros.langchain.chat_llama_ros.ChatLlamaROS._convert_content ( self,
Union[Dict[str, str], str, List[str], List[Dict[str, str]]] content )
protected

◆ _convert_message_to_dict()

list[dict[str, str]] llama_ros.langchain.chat_llama_ros.ChatLlamaROS._convert_message_to_dict ( self,
BaseMessage message )
protected

◆ _create_chat_generations()

List[BaseMessage] llama_ros.langchain.chat_llama_ros.ChatLlamaROS._create_chat_generations ( self,
GenerateResponse.Result response,
str method )
protected

◆ _default_params()

Dict[str, Any] llama_ros.langchain.chat_llama_ros.ChatLlamaROS._default_params ( self)
protected

◆ _extract_data_from_messages()

Tuple[Dict[str, str], Optional[str], Optional[str]] llama_ros.langchain.chat_llama_ros.ChatLlamaROS._extract_data_from_messages ( self,
List[BaseMessage] messages )
protected

◆ _generate()

str llama_ros.langchain.chat_llama_ros.ChatLlamaROS._generate ( self,
List[BaseMessage] messages,
Optional[List[str]] stop = None,
Optional[CallbackManagerForLLMRun] run_manager = None,
**Any kwargs )
protected

◆ _generate_prompt()

str llama_ros.langchain.chat_llama_ros.ChatLlamaROS._generate_prompt ( self,
List[dict[str, str]] messages,
** kwargs )
protected

◆ _llm_type()

str llama_ros.langchain.chat_llama_ros.ChatLlamaROS._llm_type ( self)
protected

◆ _stream()

Iterator[ChatGenerationChunk] llama_ros.langchain.chat_llama_ros.ChatLlamaROS._stream ( self,
List[BaseMessage] messages,
Optional[List[str]] stop = None,
Optional[CallbackManagerForLLMRun] run_manager = None,
**Any kwargs )
protected

◆ bind_tools()

Runnable[LanguageModelInput, BaseMessage] llama_ros.langchain.chat_llama_ros.ChatLlamaROS.bind_tools ( self,
Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]] tools,
* ,
Optional[ Union[dict, str, Literal["auto", "all", "one", "any"], bool] ] tool_choice = "auto",
Literal[ "function_calling", "json_schema", "json_mode" ] method = "function_calling",
**Any kwargs )

◆ with_structured_output()

Runnable[LanguageModelInput, Union[Dict, BaseModel]] llama_ros.langchain.chat_llama_ros.ChatLlamaROS.with_structured_output ( self,
Optional[Union[Dict, Type[BaseModel], Type]] schema = None,
* ,
bool include_raw = False,
Literal[ "function_calling", "json_schema", "json_mode" ] method = "function_calling",
**Any kwargs )

Member Data Documentation

◆ jinja_env

ImmutableSandboxedEnvironment llama_ros.langchain.chat_llama_ros.ChatLlamaROS.jinja_env
static
Initial value:
= ImmutableSandboxedEnvironment(
loader=jinja2.BaseLoader(),
trim_blocks=True,
lstrip_blocks=True,
)

◆ use_default_template

bool llama_ros.langchain.chat_llama_ros.ChatLlamaROS.use_default_template = False
static

◆ use_gguf_template

bool llama_ros.langchain.chat_llama_ros.ChatLlamaROS.use_gguf_template = True
static

The documentation for this class was generated from the following file: