Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,19 +1,25 @@
|
|
| 1 |
-
import os
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
from langchain_community.llms import HuggingFacePipeline
|
| 5 |
from langchain.chains import RetrievalQA
|
|
|
|
| 6 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 7 |
|
| 8 |
logging.basicConfig(level=logging.INFO)
|
| 9 |
|
|
|
|
| 10 |
VECTOR_STORE_DIR = "./vector_store"
|
| 11 |
MODEL_NAME = "uer/gpt2-chinese-cluecorpussmall"
|
| 12 |
EMBEDDING_MODEL_NAME = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
|
| 13 |
|
| 14 |
-
# ─── 1. 加载 LLM
|
| 15 |
print("🔧 加载生成模型…")
|
| 16 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 17 |
model = AutoModelForCausalLM.from_pretrained(
|
| 18 |
MODEL_NAME,
|
| 19 |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
|
@@ -29,43 +35,71 @@ gen_pipe = pipeline(
|
|
| 29 |
do_sample=True,
|
| 30 |
)
|
| 31 |
llm = HuggingFacePipeline(pipeline=gen_pipe)
|
|
|
|
| 32 |
|
| 33 |
-
# ─── 2. 加载向量库
|
| 34 |
print("📚 加载向量库…")
|
| 35 |
embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
|
| 36 |
vectordb = Chroma(persist_directory=VECTOR_STORE_DIR, embedding_function=embeddings)
|
| 37 |
-
|
| 38 |
-
# ─── 3. 构建 RAG 问答链 ───
|
| 39 |
retriever = vectordb.as_retriever(search_kwargs={"k": 3})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
qa_chain = RetrievalQA.from_chain_type(
|
| 41 |
llm=llm,
|
| 42 |
chain_type="stuff",
|
| 43 |
retriever=retriever,
|
|
|
|
| 44 |
return_source_documents=True,
|
| 45 |
)
|
|
|
|
| 46 |
|
| 47 |
-
# ───
|
| 48 |
def qa_fn(query: str):
|
| 49 |
if not query.strip():
|
| 50 |
return "❌ 请输入问题内容。"
|
|
|
|
| 51 |
result = qa_chain({"query": query})
|
| 52 |
-
answer = result["result"]
|
| 53 |
sources = result.get("source_documents", [])
|
|
|
|
|
|
|
|
|
|
| 54 |
sources_text = "\n\n".join(
|
| 55 |
[f"【片段 {i+1}】\n{doc.page_content}" for i, doc in enumerate(sources)]
|
| 56 |
)
|
| 57 |
-
return f"📌 回答:{answer
|
| 58 |
|
| 59 |
-
# ───
|
| 60 |
-
with gr.Blocks(title="
|
| 61 |
-
gr.Markdown("## 📘
|
| 62 |
with gr.Row():
|
| 63 |
query = gr.Textbox(label="问题", placeholder="请输入你的问题", lines=2)
|
| 64 |
-
answer = gr.Textbox(label="回答", lines=
|
| 65 |
-
gr.Button("提问").click(qa_fn, inputs=query, outputs=answer)
|
| 66 |
-
gr.Markdown(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
if __name__ == "__main__":
|
| 69 |
demo.launch()
|
| 70 |
|
| 71 |
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
+
import torch
|
| 4 |
+
import logging
|
| 5 |
+
|
| 6 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 7 |
+
from langchain_community.vectorstores import Chroma
|
| 8 |
from langchain_community.llms import HuggingFacePipeline
|
| 9 |
from langchain.chains import RetrievalQA
|
| 10 |
+
from langchain.prompts import PromptTemplate
|
| 11 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 12 |
|
| 13 |
logging.basicConfig(level=logging.INFO)
|
| 14 |
|
| 15 |
+
# ─── 配置 ─────────────────────────────────────────────────────
|
| 16 |
VECTOR_STORE_DIR = "./vector_store"
|
| 17 |
MODEL_NAME = "uer/gpt2-chinese-cluecorpussmall"
|
| 18 |
EMBEDDING_MODEL_NAME = "sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
|
| 19 |
|
| 20 |
+
# ─── 1. 加载 LLM ────────────────────────────────────────────────
|
| 21 |
print("🔧 加载生成模型…")
|
| 22 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
|
| 23 |
model = AutoModelForCausalLM.from_pretrained(
|
| 24 |
MODEL_NAME,
|
| 25 |
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
|
|
|
|
| 35 |
do_sample=True,
|
| 36 |
)
|
| 37 |
llm = HuggingFacePipeline(pipeline=gen_pipe)
|
| 38 |
+
print("✅ 生成模型加载成功。")
|
| 39 |
|
| 40 |
+
# ─── 2. 加载向量库 ─────────────────────────────────────────────
|
| 41 |
print("📚 加载向量库…")
|
| 42 |
embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
|
| 43 |
vectordb = Chroma(persist_directory=VECTOR_STORE_DIR, embedding_function=embeddings)
|
|
|
|
|
|
|
| 44 |
retriever = vectordb.as_retriever(search_kwargs={"k": 3})
|
| 45 |
+
print("✅ 向量库加载成功。")
|
| 46 |
+
|
| 47 |
+
# ─── 3. 自定义 Prompt ─────────────────────────────────────────
|
| 48 |
+
prompt_template = PromptTemplate.from_template(
|
| 49 |
+
"""你是一位专业的数学助教,请根据以下参考资料回答用户的问题。
|
| 50 |
+
如果资料中没有相关内容,请直接回答“我不知道”或“资料中未提及”,不要编造答案。
|
| 51 |
+
|
| 52 |
+
参考资料:
|
| 53 |
+
{context}
|
| 54 |
+
|
| 55 |
+
用户问题:
|
| 56 |
+
{question}
|
| 57 |
+
|
| 58 |
+
回答(只允许基于参考资料,不要编造):
|
| 59 |
+
"""
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
# ─── 4. 构建 RAG 问答链 ───────────────────────────────────────
|
| 63 |
qa_chain = RetrievalQA.from_chain_type(
|
| 64 |
llm=llm,
|
| 65 |
chain_type="stuff",
|
| 66 |
retriever=retriever,
|
| 67 |
+
chain_type_kwargs={"prompt": prompt_template},
|
| 68 |
return_source_documents=True,
|
| 69 |
)
|
| 70 |
+
print("✅ RAG 问答链构建成功。")
|
| 71 |
|
| 72 |
+
# ─── 5. 业务函数 ───────────────────────────────────────────────
|
| 73 |
def qa_fn(query: str):
|
| 74 |
if not query.strip():
|
| 75 |
return "❌ 请输入问题内容。"
|
| 76 |
+
# 执行检索与问答
|
| 77 |
result = qa_chain({"query": query})
|
| 78 |
+
answer = result["result"].strip()
|
| 79 |
sources = result.get("source_documents", [])
|
| 80 |
+
if not sources:
|
| 81 |
+
return "📌 回答:未在知识库中找到相关内容,请尝试更换问题或补充教材。"
|
| 82 |
+
# 拼接参考片段
|
| 83 |
sources_text = "\n\n".join(
|
| 84 |
[f"【片段 {i+1}】\n{doc.page_content}" for i, doc in enumerate(sources)]
|
| 85 |
)
|
| 86 |
+
return f"📌 回答:{answer}\n\n📚 参考:\n{sources_text}"
|
| 87 |
|
| 88 |
+
# ─── 6. Gradio 界面 ─────────────────────────────────────────────
|
| 89 |
+
with gr.Blocks(title="智能学习助手") as demo:
|
| 90 |
+
gr.Markdown("## 📘 智能学习助手\n输入教材相关问题,例如:“什么是函数的定义域?”")
|
| 91 |
with gr.Row():
|
| 92 |
query = gr.Textbox(label="问题", placeholder="请输入你的问题", lines=2)
|
| 93 |
+
answer = gr.Textbox(label="回答", lines=12)
|
| 94 |
+
gr.Button("提问").click(fn=qa_fn, inputs=query, outputs=answer)
|
| 95 |
+
gr.Markdown(
|
| 96 |
+
"---\n"
|
| 97 |
+
"模型:UER/GPT2-Chinese-ClueCorpus + Sentence-Transformers RAG \n"
|
| 98 |
+
"由 Hugging Face Spaces 提供算力支持"
|
| 99 |
+
)
|
| 100 |
|
| 101 |
if __name__ == "__main__":
|
| 102 |
demo.launch()
|
| 103 |
|
| 104 |
|
| 105 |
+
|