llama_ros: llama.cpp for ROS 2
Loading...
Searching...
No Matches
llama_demos.llama_rag_demo_node Namespace Reference

Functions

 format_docs (docs)
 

Variables

 loader
 
 docs = loader.load()
 
 text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
 
 splits = text_splitter.split_documents(docs)
 
 vectorstore = Chroma.from_documents(documents=splits, embedding=LlamaROSEmbeddings())
 
 retriever = vectorstore.as_retriever(search_kwargs={"k": 20})
 
 prompt
 
 compressor = LlamaROSReranker(top_n=3)
 
 compression_retriever
 
tuple rag_chain
 
 c
 
 flush
 
 True
 
 end
 

Function Documentation

◆ format_docs()

llama_demos.llama_rag_demo_node.format_docs ( docs)

Variable Documentation

◆ c

llama_demos.llama_rag_demo_node.c

◆ compression_retriever

llama_demos.llama_rag_demo_node.compression_retriever
Initial value:
1= ContextualCompressionRetriever(
2 base_compressor=compressor, base_retriever=retriever
3)

◆ compressor

llama_demos.llama_rag_demo_node.compressor = LlamaROSReranker(top_n=3)

◆ docs

llama_demos.llama_rag_demo_node.docs = loader.load()

◆ end

llama_demos.llama_rag_demo_node.end

◆ flush

llama_demos.llama_rag_demo_node.flush

◆ loader

llama_demos.llama_rag_demo_node.loader
Initial value:
1= WebBaseLoader(
2 web_paths=("https://lilianweng.github.io/posts/2023-06-23-agent/",),
3 bs_kwargs=dict(
4 parse_only=bs4.SoupStrainer(class_=("post-content", "post-title", "post-header"))
5 ),
6)

◆ prompt

llama_demos.llama_rag_demo_node.prompt
Initial value:
1= ChatPromptTemplate.from_messages(
2 [
3 SystemMessage("You are an AI assistant that answer questions briefly."),
4 HumanMessagePromptTemplate.from_template(
5 "Taking into account the following information:{context}\n\n{question}"
6 ),
7 ]
8)

◆ rag_chain

tuple llama_demos.llama_rag_demo_node.rag_chain
Initial value:
1= (
2 {"context": compression_retriever | format_docs, "question": RunnablePassthrough()}
3 | prompt
4 | ChatLlamaROS(temp=0.0)
5 | StrOutputParser()
6)

◆ retriever

llama_demos.llama_rag_demo_node.retriever = vectorstore.as_retriever(search_kwargs={"k": 20})

◆ splits

llama_demos.llama_rag_demo_node.splits = text_splitter.split_documents(docs)

◆ text_splitter

llama_demos.llama_rag_demo_node.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)

◆ True

llama_demos.llama_rag_demo_node.True

◆ vectorstore

llama_demos.llama_rag_demo_node.vectorstore = Chroma.from_documents(documents=splits, embedding=LlamaROSEmbeddings())