|
None | __init__ (self, str namespace="llama") |
|
GetMetadata | get_metadata (self, GetMetadata.Request req) |
|
Tokenize.Response | tokenize (self, Tokenize.Request req) |
|
Detokenize.Response | detokenize (self, Detokenize.Request req) |
|
GenerateEmbeddings.Response | generate_embeddings (self, GenerateEmbeddings.Request req) |
|
RerankDocuments.Response | rerank_documents (self, RerankDocuments.Request req) |
|
FormatChatMessages.Response | format_chat_prompt (self, FormatChatMessages.Request req) |
|
Union[ Tuple[GenerateResponse.Result, GoalStatus], Generator[PartialResponse, None, None],] | generate_response (self, GenerateResponse.Goal goal, Callable feedback_cb=None, bool stream=False) |
|
None | cancel_generate_text (self) |
|
◆ __init__()
None llama_ros.llama_client_node.LlamaClientNode.__init__ |
( |
| self, |
|
|
str | namespace = "llama" ) |
◆ _feedback_callback()
None llama_ros.llama_client_node.LlamaClientNode._feedback_callback |
( |
| self, |
|
|
| feedback ) |
|
protected |
◆ _get_result_callback()
None llama_ros.llama_client_node.LlamaClientNode._get_result_callback |
( |
| self, |
|
|
| future ) |
|
protected |
◆ _goal_response_callback()
None llama_ros.llama_client_node.LlamaClientNode._goal_response_callback |
( |
| self, |
|
|
| future ) |
|
protected |
◆ cancel_generate_text()
None llama_ros.llama_client_node.LlamaClientNode.cancel_generate_text |
( |
| self | ) |
|
◆ detokenize()
Detokenize.Response llama_ros.llama_client_node.LlamaClientNode.detokenize |
( |
| self, |
|
|
Detokenize.Request | req ) |
◆ format_chat_prompt()
FormatChatMessages.Response llama_ros.llama_client_node.LlamaClientNode.format_chat_prompt |
( |
| self, |
|
|
FormatChatMessages.Request
| req ) |
◆ generate_embeddings()
GenerateEmbeddings.Response llama_ros.llama_client_node.LlamaClientNode.generate_embeddings |
( |
| self, |
|
|
GenerateEmbeddings.Request
| req ) |
◆ generate_response()
Union[
Tuple[GenerateResponse.Result, GoalStatus],
Generator[PartialResponse, None, None],
] llama_ros.llama_client_node.LlamaClientNode.generate_response |
( |
| self, |
|
|
GenerateResponse.Goal | goal, |
|
|
Callable | feedback_cb = None, |
|
|
bool | stream = False ) |
◆ get_instance()
"LlamaClientNode" llama_ros.llama_client_node.LlamaClientNode.get_instance |
( |
| ) |
|
|
static |
◆ get_metadata()
GetMetadata llama_ros.llama_client_node.LlamaClientNode.get_metadata |
( |
| self, |
|
|
GetMetadata.Request | req ) |
◆ rerank_documents()
RerankDocuments.Response llama_ros.llama_client_node.LlamaClientNode.rerank_documents |
( |
| self, |
|
|
RerankDocuments.Request | req ) |
◆ tokenize()
Tokenize.Response llama_ros.llama_client_node.LlamaClientNode.tokenize |
( |
| self, |
|
|
Tokenize.Request | req ) |
◆ _action_client
ActionClient llama_ros.llama_client_node.LlamaClientNode._action_client = None |
|
staticprotected |
◆ _action_done
bool llama_ros.llama_client_node.LlamaClientNode._action_done = False |
|
staticprotected |
◆ _action_done_cond
Condition llama_ros.llama_client_node.LlamaClientNode._action_done_cond = Condition() |
|
staticprotected |
◆ _action_result
llama_ros.llama_client_node.LlamaClientNode._action_result = None |
|
staticprotected |
◆ _action_status
GoalStatus llama_ros.llama_client_node.LlamaClientNode._action_status = GoalStatus.STATUS_UNKNOWN |
|
staticprotected |
◆ _callback_group
ReentrantCallbackGroup llama_ros.llama_client_node.LlamaClientNode._callback_group = ReentrantCallbackGroup() |
|
staticprotected |
◆ _detokenize_srv_client
llama_ros.llama_client_node.LlamaClientNode._detokenize_srv_client |
|
protected |
Initial value:= self.create_client(
Detokenize, "detokenize", callback_group=self._callback_group
)
◆ _embeddings_srv_client
Client llama_ros.llama_client_node.LlamaClientNode._embeddings_srv_client = None |
|
staticprotected |
◆ _executor
MultiThreadedExecutor llama_ros.llama_client_node.LlamaClientNode._executor = None |
|
staticprotected |
◆ _format_chat_srv_client
llama_ros.llama_client_node.LlamaClientNode._format_chat_srv_client |
|
protected |
Initial value:= self.create_client(
FormatChatMessages,
"format_chat_prompt",
callback_group=self._callback_group,
)
◆ _get_metadata_srv_client
llama_ros.llama_client_node.LlamaClientNode._get_metadata_srv_client |
|
protected |
Initial value:= self.create_client(
GetMetadata, "get_metadata", callback_group=self._callback_group
)
◆ _get_result_callback
llama_ros.llama_client_node.LlamaClientNode._get_result_callback = self._goal_handle.get_result_async() |
|
protected |
◆ _goal_handle
ClientGoalHandle llama_ros.llama_client_node.LlamaClientNode._goal_handle = None |
|
staticprotected |
◆ _goal_handle_lock
RLock llama_ros.llama_client_node.LlamaClientNode._goal_handle_lock = RLock() |
|
staticprotected |
◆ _goal_response_callback
llama_ros.llama_client_node.LlamaClientNode._goal_response_callback |
|
protected |
Initial value:= self._action_client.send_goal_async(
goal, feedback_callback=feedback_cb
)
◆ _instance
str llama_ros.llama_client_node.LlamaClientNode._instance = None |
|
staticprotected |
◆ _lock
RLock llama_ros.llama_client_node.LlamaClientNode._lock = RLock() |
|
staticprotected |
◆ _partial_results
list llama_ros.llama_client_node.LlamaClientNode._partial_results = [] |
|
staticprotected |
◆ _rerank_srv_client
llama_ros.llama_client_node.LlamaClientNode._rerank_srv_client |
|
protected |
Initial value:= self.create_client(
RerankDocuments, "rerank_documents", callback_group=self._callback_group
)
◆ _spin_thread
Thread llama_ros.llama_client_node.LlamaClientNode._spin_thread = None |
|
staticprotected |
◆ _tokenize_srv_client
Client llama_ros.llama_client_node.LlamaClientNode._tokenize_srv_client = None |
|
staticprotected |
The documentation for this class was generated from the following file: