Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,7 +13,7 @@ import gradio as gr
|
|
| 13 |
import random
|
| 14 |
import prompts
|
| 15 |
client = InferenceClient(
|
| 16 |
-
"mistralai/Mixtral-8x7B-
|
| 17 |
)
|
| 18 |
|
| 19 |
def format_prompt(message, history):
|
|
@@ -46,7 +46,7 @@ rag_retriever = pipeline("text-generation", model="mistralai/Mixtral-8x7B-v0.1")
|
|
| 46 |
chat_model = AutoModelForSeq2SeqLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
|
| 47 |
|
| 48 |
# Load tokenizer
|
| 49 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
| 50 |
|
| 51 |
def process_input(user_input: str) -> str:
|
| 52 |
# Input pipeline: Tokenize and preprocess user input
|
|
|
|
| 13 |
import random
|
| 14 |
import prompts
|
| 15 |
client = InferenceClient(
|
| 16 |
+
"mistralai/Mixtral-8x7B-v0.1"
|
| 17 |
)
|
| 18 |
|
| 19 |
def format_prompt(message, history):
|
|
|
|
| 46 |
chat_model = AutoModelForSeq2SeqLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
|
| 47 |
|
| 48 |
# Load tokenizer
|
| 49 |
+
tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
| 50 |
|
| 51 |
def process_input(user_input: str) -> str:
|
| 52 |
# Input pipeline: Tokenize and preprocess user input
|