fix 'No candidate PyTorch version found for ZeroGPU'
Browse files
app.py
CHANGED
|
@@ -4,7 +4,6 @@ import gradio as gr
|
|
| 4 |
from functools import lru_cache
|
| 5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 6 |
|
| 7 |
-
# 可選模型列表
|
| 8 |
MODEL_LIST = [
|
| 9 |
"ckiplab/gpt2-tiny-chinese",
|
| 10 |
"ckiplab/gpt2-base-chinese",
|
|
@@ -18,43 +17,33 @@ MODEL_LIST = [
|
|
| 18 |
@lru_cache(maxsize=None)
|
| 19 |
def get_pipeline(model_name):
|
| 20 |
tok = AutoTokenizer.from_pretrained(model_name)
|
| 21 |
-
|
|
|
|
|
|
|
| 22 |
mdl.to("cuda")
|
| 23 |
return pipeline("text-generation", model=mdl, tokenizer=tok, device=0)
|
| 24 |
|
| 25 |
@spaces.GPU
|
| 26 |
def suggest_next(text, model_name, k, m):
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
return [out["generated_text"][len(text):] for out in outs]
|
| 31 |
|
| 32 |
def append_suggestion(current, choice):
|
| 33 |
return current + choice
|
| 34 |
|
| 35 |
with gr.Blocks() as demo:
|
| 36 |
-
gr.Markdown(
|
| 37 |
-
"## 🇹🇼 台灣中文下段預測\n"
|
| 38 |
-
"結合小型語言模型與 ZeroGPU,提供即時 IME 風格的下段文字建議。"
|
| 39 |
-
)
|
| 40 |
|
| 41 |
-
input_text = gr.TextArea(
|
| 42 |
-
label="輸入文字", lines=4, placeholder="請在此輸入起始片段…"
|
| 43 |
-
)
|
| 44 |
|
| 45 |
with gr.Row():
|
| 46 |
-
model_selector = gr.Dropdown(
|
| 47 |
-
|
| 48 |
-
)
|
| 49 |
-
k_slider = gr.Slider(
|
| 50 |
-
minimum=1, maximum=50, step=1, value=5, label="K(最大新生成詞元)"
|
| 51 |
-
)
|
| 52 |
-
m_slider = gr.Slider(
|
| 53 |
-
minimum=1, maximum=10, step=1, value=5, label="M(建議數量)"
|
| 54 |
-
)
|
| 55 |
|
| 56 |
suggestions = gr.Dropdown([], label="建議清單", interactive=True)
|
| 57 |
-
|
| 58 |
gpu_button = gr.Button("使用 GPU 生成建議")
|
| 59 |
|
| 60 |
gpu_button.click(
|
|
@@ -62,7 +51,6 @@ with gr.Blocks() as demo:
|
|
| 62 |
inputs=[input_text, model_selector, k_slider, m_slider],
|
| 63 |
outputs=suggestions,
|
| 64 |
)
|
| 65 |
-
|
| 66 |
suggestions.change(
|
| 67 |
fn=append_suggestion,
|
| 68 |
inputs=[input_text, suggestions],
|
|
|
|
| 4 |
from functools import lru_cache
|
| 5 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 6 |
|
|
|
|
| 7 |
MODEL_LIST = [
|
| 8 |
"ckiplab/gpt2-tiny-chinese",
|
| 9 |
"ckiplab/gpt2-base-chinese",
|
|
|
|
| 17 |
@lru_cache(maxsize=None)
|
| 18 |
def get_pipeline(model_name):
|
| 19 |
tok = AutoTokenizer.from_pretrained(model_name)
|
| 20 |
+
# By setting weights_only=False we bypass the torch.load(weights_only=True)
|
| 21 |
+
# path that is disallowed for torch<2.6 due to CVE-2025-32434 :contentReference[oaicite:1]{index=1}.
|
| 22 |
+
mdl = AutoModelForCausalLM.from_pretrained(model_name, weights_only=False)
|
| 23 |
mdl.to("cuda")
|
| 24 |
return pipeline("text-generation", model=mdl, tokenizer=tok, device=0)
|
| 25 |
|
| 26 |
@spaces.GPU
|
| 27 |
def suggest_next(text, model_name, k, m):
|
| 28 |
+
outs = get_pipeline(model_name)(
|
| 29 |
+
text, max_new_tokens=k, num_return_sequences=m, do_sample=False
|
| 30 |
+
)
|
| 31 |
return [out["generated_text"][len(text):] for out in outs]
|
| 32 |
|
| 33 |
def append_suggestion(current, choice):
|
| 34 |
return current + choice
|
| 35 |
|
| 36 |
with gr.Blocks() as demo:
|
| 37 |
+
gr.Markdown("## 🇹🇼 台灣中文下段預測(ZeroGPU + Gradio v5)")
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
+
input_text = gr.TextArea(label="輸入文字", lines=4, placeholder="請在此輸入起始片段…")
|
|
|
|
|
|
|
| 40 |
|
| 41 |
with gr.Row():
|
| 42 |
+
model_selector = gr.Dropdown(MODEL_LIST, value=MODEL_LIST[0], label="選擇模型")
|
| 43 |
+
k_slider = gr.Slider(1, 50, value=5, label="K(最大新生成詞元)")
|
| 44 |
+
m_slider = gr.Slider(1, 10, value=5, label="M(建議數量)")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
suggestions = gr.Dropdown([], label="建議清單", interactive=True)
|
|
|
|
| 47 |
gpu_button = gr.Button("使用 GPU 生成建議")
|
| 48 |
|
| 49 |
gpu_button.click(
|
|
|
|
| 51 |
inputs=[input_text, model_selector, k_slider, m_slider],
|
| 52 |
outputs=suggestions,
|
| 53 |
)
|
|
|
|
| 54 |
suggestions.change(
|
| 55 |
fn=append_suggestion,
|
| 56 |
inputs=[input_text, suggestions],
|