Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
import os
|
| 2 |
-
import streamlit as st
|
| 3 |
import subprocess
|
| 4 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, AutoModel, RagRetriever, AutoModelForSeq2SeqLM
|
| 5 |
import black
|
|
@@ -9,17 +8,13 @@ import sys
|
|
| 9 |
import torch
|
| 10 |
from huggingface_hub import hf_hub_url, cached_download, HfApi, InferenceClient
|
| 11 |
import base64
|
|
|
|
| 12 |
|
| 13 |
-
#
|
| 14 |
-
|
| 15 |
|
| 16 |
-
|
| 17 |
-
load_dotenv()
|
| 18 |
|
| 19 |
-
# Use the HUGGINGFACE_TOKEN in your code
|
| 20 |
-
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
| 21 |
-
print(HUGGINGFACE_TOKEN)
|
| 22 |
-
r
|
| 23 |
# Add the new HTML code below
|
| 24 |
custom_html = '''
|
| 25 |
<div style='position:fixed;bottom:0;left:0;width:100%;'>
|
|
@@ -73,7 +68,7 @@ AVAILABLE_CODE_GENERATIVE_MODELS = [
|
|
| 73 |
]
|
| 74 |
|
| 75 |
# Load pre-trained RAG retriever
|
| 76 |
-
rag_retriever = RagRetriever.from_pretrained("facebook/rag-token-base") # Use a Hugging Face RAG model
|
| 77 |
|
| 78 |
# Load pre-trained chat model
|
| 79 |
chat_model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/DialoGPT-medium") # Use a Hugging Face chat model
|
|
@@ -244,7 +239,7 @@ def chat_interface_with_agent(input_text, agent_name):
|
|
| 244 |
input_ids = input_ids[:, :max_input_length]
|
| 245 |
|
| 246 |
outputs = model.generate(
|
| 247 |
-
input_ids, max_new_tokens=
|
| 248 |
pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
|
| 249 |
)
|
| 250 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
|
| 1 |
import os
|
|
|
|
| 2 |
import subprocess
|
| 3 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, AutoModel, RagRetriever, AutoModelForSeq2SeqLM
|
| 4 |
import black
|
|
|
|
| 8 |
import torch
|
| 9 |
from huggingface_hub import hf_hub_url, cached_download, HfApi, InferenceClient
|
| 10 |
import base64
|
| 11 |
+
import streamlit as st
|
| 12 |
|
| 13 |
+
# Use a publicly available model that doesn't require authentication
|
| 14 |
+
rag_retriever = pipeline("retrieval-question-answering", model="distilbert-base-nq")
|
| 15 |
|
| 16 |
+
st.write("Pipeline created successfully")
|
|
|
|
| 17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
# Add the new HTML code below
|
| 19 |
custom_html = '''
|
| 20 |
<div style='position:fixed;bottom:0;left:0;width:100%;'>
|
|
|
|
| 68 |
]
|
| 69 |
|
| 70 |
# Load pre-trained RAG retriever
|
| 71 |
+
# rag_retriever = RagRetriever.from_pretrained("facebook/rag-token-base") # Use a Hugging Face RAG model
|
| 72 |
|
| 73 |
# Load pre-trained chat model
|
| 74 |
chat_model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/DialoGPT-medium") # Use a Hugging Face chat model
|
|
|
|
| 239 |
input_ids = input_ids[:, :max_input_length]
|
| 240 |
|
| 241 |
outputs = model.generate(
|
| 242 |
+
input_ids, max_new_tokens=1000, num_return_sequences=1, do_sample=True,
|
| 243 |
pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
|
| 244 |
)
|
| 245 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|