Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -57,11 +57,15 @@ def get_pipeline(model_name):
|
|
| 57 |
tokenizer = AutoTokenizer.from_pretrained(local_path)
|
| 58 |
model = AutoModelForCausalLM.from_pretrained(local_path)
|
| 59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
generator = pipeline(
|
| 61 |
"text-generation",
|
| 62 |
model=model,
|
| 63 |
tokenizer=tokenizer,
|
| 64 |
-
device
|
| 65 |
)
|
| 66 |
_loaded_pipelines[model_name] = generator
|
| 67 |
return _loaded_pipelines[model_name]
|
|
@@ -70,10 +74,11 @@ def call_local_inference(model_name, prompt, max_new_tokens=256):
|
|
| 70 |
try:
|
| 71 |
generator = get_pipeline(model_name)
|
| 72 |
outputs = generator(
|
| 73 |
-
prompt,
|
| 74 |
-
max_new_tokens=max_new_tokens,
|
| 75 |
-
do_sample=True,
|
| 76 |
-
temperature=0.7
|
|
|
|
| 77 |
)
|
| 78 |
return outputs[0]["generated_text"]
|
| 79 |
except Exception as e:
|
|
@@ -115,7 +120,7 @@ def generate_article_progress(query, model_name, segments=5):
|
|
| 115 |
# 5. Gradio 介面
|
| 116 |
# -------------------------------
|
| 117 |
with gr.Blocks() as demo:
|
| 118 |
-
gr.Markdown("#
|
| 119 |
gr.Markdown("支援 DistilGPT2 / BTLM-3B / BART-Base,Auto 模式會自動選擇。")
|
| 120 |
|
| 121 |
query_input = gr.Textbox(lines=2, placeholder="請輸入文章主題", label="文章主題")
|
|
|
|
| 57 |
tokenizer = AutoTokenizer.from_pretrained(local_path)
|
| 58 |
model = AutoModelForCausalLM.from_pretrained(local_path)
|
| 59 |
|
| 60 |
+
# 修正 pad_token 缺失問題
|
| 61 |
+
if tokenizer.pad_token is None:
|
| 62 |
+
tokenizer.pad_token = tokenizer.eos_token
|
| 63 |
+
|
| 64 |
generator = pipeline(
|
| 65 |
"text-generation",
|
| 66 |
model=model,
|
| 67 |
tokenizer=tokenizer,
|
| 68 |
+
device=-1 # CPU 強制
|
| 69 |
)
|
| 70 |
_loaded_pipelines[model_name] = generator
|
| 71 |
return _loaded_pipelines[model_name]
|
|
|
|
| 74 |
try:
|
| 75 |
generator = get_pipeline(model_name)
|
| 76 |
outputs = generator(
|
| 77 |
+
prompt,
|
| 78 |
+
max_new_tokens=max_new_tokens,
|
| 79 |
+
do_sample=True,
|
| 80 |
+
temperature=0.7,
|
| 81 |
+
pad_token_id=generator.tokenizer.pad_token_id
|
| 82 |
)
|
| 83 |
return outputs[0]["generated_text"]
|
| 84 |
except Exception as e:
|
|
|
|
| 120 |
# 5. Gradio 介面
|
| 121 |
# -------------------------------
|
| 122 |
with gr.Blocks() as demo:
|
| 123 |
+
gr.Markdown("# 📺 電視弘法視頻生成文章RAG系統(CPU免費版))")
|
| 124 |
gr.Markdown("支援 DistilGPT2 / BTLM-3B / BART-Base,Auto 模式會自動選擇。")
|
| 125 |
|
| 126 |
query_input = gr.Textbox(lines=2, placeholder="請輸入文章主題", label="文章主題")
|