雷娃 commited on
Commit
f308a75
·
1 Parent(s): 7de1f3b

remove huggingface client

Browse files
Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -3,8 +3,6 @@ from threading import Thread
3
  import gradio as gr
4
  import re
5
  import torch
6
- from huggingface_hub import InferenceClient
7
-
8
 
9
  # load model and tokenizer
10
  model_name = "inclusionAI/Ling-mini-2.0"
@@ -22,13 +20,12 @@ def respond(
22
  system_message,
23
  max_tokens,
24
  temperature,
25
- top_p,
26
- hf_token: gr.OAuthToken,
27
  ):
28
  """
29
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
30
  """
31
- client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
32
 
33
  messages = [{"role": "system", "content": system_message}]
34
 
 
3
  import gradio as gr
4
  import re
5
  import torch
 
 
6
 
7
  # load model and tokenizer
8
  model_name = "inclusionAI/Ling-mini-2.0"
 
20
  system_message,
21
  max_tokens,
22
  temperature,
23
+ top_p
 
24
  ):
25
  """
26
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
27
  """
28
+ #client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
29
 
30
  messages = [{"role": "system", "content": system_message}]
31