#`BeautifulSoup'解析網(wǎng)頁內(nèi)容:按照標簽,、類名,、ID 等方式來定位和提取你需要的內(nèi)容 import bs4 #Load HTML pages using `urllib` and parse them with `BeautifulSoup' from langchain_community.document_loaders import WebBaseLoader #文本分割 from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_core.prompts import PromptTemplate prompt = PromptTemplate( input_variables=['context', 'question'], template= '''You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. you don't know the answer, just say you don't know without any explanation Question: {question} Context: {context} Answer:''', )
基于langchain實現(xiàn)檢索問答
from langchain.chains import RetrievalQA # 向量數(shù)據(jù)庫檢索器 retriever = vectorstore.as_retriever()
qa_chain = RetrievalQA.from_chain_type( llm, retriever=retriever, chain_type_kwargs={'prompt': prompt} ) # what is Composition API? question = 'what is vue?' result = qa_chain.invoke({'query': question})
# output # I think I know this one! Based on the context, # Vue is a JavaScript framework for building user interfaces # that builds on top of standard HTML, CSS, and JavaScript. # It provides a declarative way to use Vue primarily in # low-complexity scenarios or for building full applications with # Composition API + Single-File Components.
如果我問的問題與文檔無關它的回答是怎樣呢,?
question = 'what is react?' result = qa_chain.invoke({'query': question})
import gradio as gr from langchain_community.llms import Ollama from langchain.callbacks.manager import CallbackManager from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain_community.document_loaders import WebBaseLoader from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain_community.vectorstores import Chroma from langchain_community.embeddings import OllamaEmbeddings from langchain.chains import RetrievalQA from langchain_core.prompts import PromptTemplate
defchroma_retriever_store_content(splits): # 基于ollama運行嵌入模型 nomic-embed-text :A high-performing open embedding model with a large token context window. vectorstore = Chroma.from_documents(documents=splits, embedding=OllamaEmbeddings(model='nomic-embed-text')) return vectorstore.as_retriever()
defrag_prompt(): return PromptTemplate( input_variables=['context', 'question'], template= '''You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. you don't know the answer, just say you don't know without any explanation Question: {question} Context: {context} Answer:''', )