Spaces:
Running
on
A10G
Running
on
A10G
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,6 +9,8 @@ from huggingface_hub import snapshot_download
|
|
| 9 |
from huggingface_hub import whoami
|
| 10 |
from huggingface_hub import ModelCard
|
| 11 |
|
|
|
|
|
|
|
| 12 |
from textwrap import dedent
|
| 13 |
|
| 14 |
LLAMA_LIKE_ARCHS = ["MistralForCausalLM", "LlamaForCausalLM"]
|
|
@@ -141,10 +143,10 @@ def process_model(model_id, q_method, hf_token, private_repo):
|
|
| 141 |
iface = gr.Interface(
|
| 142 |
fn=process_model,
|
| 143 |
inputs=[
|
| 144 |
-
|
| 145 |
-
lines=1,
|
| 146 |
label="Hub Model ID",
|
| 147 |
-
|
|
|
|
| 148 |
),
|
| 149 |
gr.Dropdown(
|
| 150 |
["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0"],
|
|
@@ -175,4 +177,4 @@ iface = gr.Interface(
|
|
| 175 |
)
|
| 176 |
|
| 177 |
# Launch the interface
|
| 178 |
-
iface.queue(default_concurrency_limit=1, max_size=5).launch(debug=True)
|
|
|
|
| 9 |
from huggingface_hub import whoami
|
| 10 |
from huggingface_hub import ModelCard
|
| 11 |
|
| 12 |
+
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
| 13 |
+
|
| 14 |
from textwrap import dedent
|
| 15 |
|
| 16 |
LLAMA_LIKE_ARCHS = ["MistralForCausalLM", "LlamaForCausalLM"]
|
|
|
|
| 143 |
iface = gr.Interface(
|
| 144 |
fn=process_model,
|
| 145 |
inputs=[
|
| 146 |
+
HuggingfaceHubSearch(
|
|
|
|
| 147 |
label="Hub Model ID",
|
| 148 |
+
placeholder="Search for model id on Huggingface",
|
| 149 |
+
search_type="model",
|
| 150 |
),
|
| 151 |
gr.Dropdown(
|
| 152 |
["Q2_K", "Q3_K_S", "Q3_K_M", "Q3_K_L", "Q4_0", "Q4_K_S", "Q4_K_M", "Q5_0", "Q5_K_S", "Q5_K_M", "Q6_K", "Q8_0"],
|
|
|
|
| 177 |
)
|
| 178 |
|
| 179 |
# Launch the interface
|
| 180 |
+
iface.queue(default_concurrency_limit=1, max_size=5).launch(debug=True)
|