Gregor Betz
commited on
checks
Browse files- app.py +3 -3
- config.yaml +1 -1
app.py
CHANGED
|
@@ -213,17 +213,17 @@ with gr.Blocks() as demo:
|
|
| 213 |
|
| 214 |
# set up client and guide
|
| 215 |
if not client_kwargs["inference_server_url"]:
|
| 216 |
-
gr.Error(
|
| 217 |
"Please set the client model inference endpoint in the config.yaml file.",
|
| 218 |
duration=-1
|
| 219 |
)
|
| 220 |
if not guide_kwargs["inference_server_url"]:
|
| 221 |
-
gr.Error(
|
| 222 |
"Please set the expert model inference endpoint in the config.yaml file.",
|
| 223 |
duration=-1
|
| 224 |
)
|
| 225 |
if not guide_kwargs["classifier_kwargs"]["inference_server_url"]:
|
| 226 |
-
gr.Error(
|
| 227 |
"Please set the classifier model inference endpoint in the config.yaml file.",
|
| 228 |
duration=-1
|
| 229 |
)
|
|
|
|
| 213 |
|
| 214 |
# set up client and guide
|
| 215 |
if not client_kwargs["inference_server_url"]:
|
| 216 |
+
raise gr.Error(
|
| 217 |
"Please set the client model inference endpoint in the config.yaml file.",
|
| 218 |
duration=-1
|
| 219 |
)
|
| 220 |
if not guide_kwargs["inference_server_url"]:
|
| 221 |
+
raise gr.Error(
|
| 222 |
"Please set the expert model inference endpoint in the config.yaml file.",
|
| 223 |
duration=-1
|
| 224 |
)
|
| 225 |
if not guide_kwargs["classifier_kwargs"]["inference_server_url"]:
|
| 226 |
+
raise gr.Error(
|
| 227 |
"Please set the classifier model inference endpoint in the config.yaml file.",
|
| 228 |
duration=-1
|
| 229 |
)
|
config.yaml
CHANGED
|
@@ -8,5 +8,5 @@ expert_llm:
|
|
| 8 |
model_id: "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
| 9 |
classifier_llm:
|
| 10 |
model_id: "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"
|
| 11 |
-
url: "" # <-- start your own inference endpoint of classifier model
|
| 12 |
batch_size: 8
|
|
|
|
| 8 |
model_id: "meta-llama/Meta-Llama-3.1-70B-Instruct"
|
| 9 |
classifier_llm:
|
| 10 |
model_id: "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"
|
| 11 |
+
url: "" # <-- start your own inference endpoint of classifier model and provide url here
|
| 12 |
batch_size: 8
|