|
|
import gradio as gr |
|
|
from huggingface_hub import InferenceClient |
|
|
import json |
|
|
from datetime import datetime |
|
|
|
|
|
def respond( |
|
|
message, |
|
|
history: list[dict[str, str]], |
|
|
system_message, |
|
|
max_tokens, |
|
|
temperature, |
|
|
top_p, |
|
|
hf_token: str, |
|
|
): |
|
|
""" |
|
|
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference |
|
|
""" |
|
|
try: |
|
|
|
|
|
client = InferenceClient(token=hf_token, model="openai/gpt-oss-20b") |
|
|
|
|
|
|
|
|
max_history_length = 5 |
|
|
history = history[-max_history_length:] if len(history) > max_history_length else history |
|
|
|
|
|
|
|
|
writing_keywords = ["作文", "寫作", "文章", "閱讀", "詩詞", "擴展", "增長", "寫一篇", "故事", "描述"] |
|
|
is_writing_task = any(keyword in message.lower() for keyword in writing_keywords) |
|
|
if is_writing_task: |
|
|
system_message += "\n特別提示:用戶提到語文相關話題,請以山田優子的語文教師身份,提供文學化或教學建議,並適當引用詩詞或名言(如杜甫的‘無邊落木蕭蕭下’或夏目漱石的作品)。保持溫柔但嚴格的語氣,鼓勵學生探索文字之美,並融入幽默來化解尷尬。" |
|
|
|
|
|
|
|
|
japanese_keywords = ["こんにちは", "日本", "文化", "夏目漱石", "作文を書"] |
|
|
is_japanese = any(keyword in message for keyword in japanese_keywords) or any(ord(c) >= 0x3040 and ord(c) <= 0x30FF for c in message) |
|
|
if is_japanese: |
|
|
system_message += "\n特別提示:用戶提到日文或日本文化,請適當使用日文回應,例如問候或引用日本文學(如夏目漱石)。" |
|
|
|
|
|
|
|
|
responses = [] |
|
|
target_length = 2000 |
|
|
current_length = 0 |
|
|
continuation_prompt = message |
|
|
|
|
|
if is_writing_task: |
|
|
while current_length < target_length: |
|
|
messages = [{"role": "system", "content": system_message}] |
|
|
messages.extend(history) |
|
|
messages.append({"role": "user", "content": continuation_prompt}) |
|
|
|
|
|
response = "" |
|
|
try: |
|
|
for message in client.chat_completion( |
|
|
messages, |
|
|
max_tokens=max_tokens, |
|
|
stream=True, |
|
|
temperature=temperature, |
|
|
top_p=top_p, |
|
|
): |
|
|
choices = message.choices |
|
|
token = choices[0].delta.content if len(choices) and choices[0].delta.content else "" |
|
|
response += token |
|
|
yield response |
|
|
except Exception as e: |
|
|
yield f"生成過程中發生錯誤:{str(e)}。請檢查 Hugging Face API token 或模型連線。" |
|
|
return |
|
|
|
|
|
responses.append(response) |
|
|
current_length += len(response) |
|
|
history.append({"role": "user", "content": continuation_prompt}) |
|
|
history.append({"role": "assistant", "content": response}) |
|
|
|
|
|
|
|
|
continuation_prompt = f"請繼續擴展以下內容,保持山田優子的語文教師風格,目標總字數達{target_length}字:\n{response[-500:] if len(response) > 500 else response}" |
|
|
|
|
|
|
|
|
if current_length >= target_length - max_tokens: |
|
|
max_tokens = max(target_length - current_length + 100, 50) |
|
|
if max_tokens < 50: |
|
|
break |
|
|
|
|
|
final_response = "\n\n".join(responses) |
|
|
else: |
|
|
|
|
|
messages = [{"role": "system", "content": system_message}] |
|
|
messages.extend(history) |
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
|
|
final_response = "" |
|
|
for message in client.chat_completion( |
|
|
messages, |
|
|
max_tokens=max_tokens, |
|
|
stream=True, |
|
|
temperature=temperature, |
|
|
top_p=top_p, |
|
|
): |
|
|
choices = message.choices |
|
|
token = choices[0].delta.content if len(choices) and choices[0].delta.content else "" |
|
|
final_response += token |
|
|
yield final_response |
|
|
|
|
|
history.append({"role": "user", "content": message}) |
|
|
history.append({"role": "assistant", "content": final_response}) |
|
|
|
|
|
|
|
|
log_entry = { |
|
|
"user_message": message, |
|
|
"bot_response": final_response, |
|
|
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
|
|
} |
|
|
with open("chat_log.json", "a", encoding="utf-8") as f: |
|
|
json.dump(log_entry, f, ensure_ascii=False) |
|
|
f.write("\n") |
|
|
|
|
|
yield final_response |
|
|
|
|
|
|
|
|
except Exception as e: |
|
|
yield f"抱歉,山田優子遇到了一些技術問題:{str(e)}。請檢查你的 Hugging Face API token、網路連線,或確認模型 'openai/gpt-oss-20b' 可用。" |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
with gr.Sidebar(): |
|
|
gr.Markdown("請輸入 Hugging Face API token 或登錄") |
|
|
hf_token = gr.Textbox(label="Hugging Face API Token", type="password") |
|
|
|
|
|
gr.Markdown("📢 想聽山田優子用溫柔的語氣教你語文?請下載 Grok iOS 或 Android 應用程式,開啟語音模式!") |
|
|
|
|
|
|
|
|
input_text = gr.Textbox( |
|
|
placeholder="請輸入你的問題或短文(例如‘寫一篇關於秋天的文章’),山田優子將為你擴展至2000字以上!", |
|
|
lines=10, |
|
|
max_lines=50, |
|
|
label="輸入區" |
|
|
) |
|
|
output_text = gr.Textbox(label="山田優子的回應", lines=20) |
|
|
system_message = gr.Textbox( |
|
|
value="你是一位名叫山田優子的語文教師,擁有黑色低馬尾髮型,身高175公分,體重60-70公斤。你溫柔但對學生要求嚴格,喜歡用文學化的語言表達,偶爾會引用詩詞或幽默的語句來化解尷尬。你的教學風格充滿同理心,鼓勵學生探索文字之美。如果用戶使用日文或提到日本文化,你會適當融入日文回應,例如問候或引用日本文學(如夏目漱石的句子)。", |
|
|
label="System message" |
|
|
) |
|
|
max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens") |
|
|
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature") |
|
|
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)") |
|
|
|
|
|
|
|
|
submit_button = gr.Button("提交") |
|
|
|
|
|
|
|
|
history = gr.State([]) |
|
|
|
|
|
|
|
|
submit_button.click( |
|
|
fn=respond, |
|
|
inputs=[input_text, history, system_message, max_tokens, temperature, top_p, hf_token], |
|
|
outputs=[output_text, history] |
|
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |