from langchain_ollama import OllamaEmbeddings from langchain_chroma import Chroma from langchain_core.documents import Document import os import pandas as pd
df = pd.read_csv("1.csv") embeddings = OllamaEmbeddings(model="mxbai-embed-large") db_location = "./chrome_langchain_db" add_documents = not os.path.exists(db_location) if add_documents: documents = [] ids = [] for i, row in df.iterrows(): document = Document(page_content=str(row.to_dict())) documents.append(document) ids.append(str(i)) db = Chroma.from_documents(documents, embeddings, persist_directory=db_location) db.persist() else: db = Chroma(persist_directory=db_location, embedding_function=embeddings) retriever = db.as_retriever()
2. 构建问答 Agent
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
from langchain_ollama.llms import OllamaLLM from langchain.prompts import ChatPromptTemplate
model = OllamaLLM(model="deepseek-r1:8b") template = """you are expert in answering questions about the projects management.\nhere are some relevant reviews: {reviews}\nplease answer the question: {question}""" prompt = ChatPromptTemplate.from_template(template)
whileTrue: print("~~~~~~~~~~~~~~~~~~~~~~~~~~") question = input("Enter your question (or type 'q' to stop): ") if question.strip().lower() == 'q': break docs = retriever.get_relevant_documents(question) reviews = "\n".join([doc.page_content for doc in docs]) result = prompt.format(reviews=reviews, question=question) answer = model.invoke(result) print("Answer:", answer)