Spaces:
Running
on
Zero
Running
on
Zero
jedick
commited on
Commit
·
9d76733
1
Parent(s):
193aa8d
Revert model downloading
Browse files
app.py
CHANGED
|
@@ -4,7 +4,7 @@ from graph import BuildGraph
|
|
| 4 |
from retriever import db_dir
|
| 5 |
from langgraph.checkpoint.memory import MemorySaver
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
-
from main import openai_model, model_id
|
| 8 |
from util import get_sources, get_start_end_months
|
| 9 |
from mods.tool_calling_llm import extract_think
|
| 10 |
import requests
|
|
@@ -82,6 +82,7 @@ def run_workflow(input, history, compute_mode, thread_id, session_hash):
|
|
| 82 |
if compute_mode == "local":
|
| 83 |
gr.Info(
|
| 84 |
f"Please wait for the local model to load",
|
|
|
|
| 85 |
title=f"Model loading...",
|
| 86 |
)
|
| 87 |
# Get the chat model and build the graph
|
|
@@ -210,11 +211,6 @@ def to_workflow(request: gr.Request, *args):
|
|
| 210 |
# Add session_hash to arguments
|
| 211 |
new_args = args + (request.session_hash,)
|
| 212 |
if compute_mode == "local":
|
| 213 |
-
# If graph hasn't been instantiated, download model before running workflow
|
| 214 |
-
graph = graph_instances[compute_mode].get(request.session_hash)
|
| 215 |
-
if graph is None:
|
| 216 |
-
gr.Info("Downloading model, please wait", title="Downloading model...")
|
| 217 |
-
DownloadChatModel()
|
| 218 |
# Call the workflow function with the @spaces.GPU decorator
|
| 219 |
for value in run_workflow_local(*new_args):
|
| 220 |
yield value
|
|
@@ -658,9 +654,9 @@ with gr.Blocks(
|
|
| 658 |
# For S3 (need AWS_ACCESS_KEY_ID and AWS_ACCESS_KEY_SECRET)
|
| 659 |
download_file_from_bucket("r-help-chat", "db.zip", "db.zip")
|
| 660 |
## For Dropbox (shared file - key is in URL)
|
| 661 |
-
#shared_link = "https://www.dropbox.com/scl/fi/jx90g5lorpgkkyyzeurtc/db.zip?rlkey=wvqa3p9hdy4rmod1r8yf2am09&st=l9tsam56&dl=0"
|
| 662 |
-
#output_filename = "db.zip"
|
| 663 |
-
#download_dropbox_file(shared_link, output_filename)
|
| 664 |
|
| 665 |
return None
|
| 666 |
|
|
|
|
| 4 |
from retriever import db_dir
|
| 5 |
from langgraph.checkpoint.memory import MemorySaver
|
| 6 |
from dotenv import load_dotenv
|
| 7 |
+
from main import openai_model, model_id
|
| 8 |
from util import get_sources, get_start_end_months
|
| 9 |
from mods.tool_calling_llm import extract_think
|
| 10 |
import requests
|
|
|
|
| 82 |
if compute_mode == "local":
|
| 83 |
gr.Info(
|
| 84 |
f"Please wait for the local model to load",
|
| 85 |
+
duration=15,
|
| 86 |
title=f"Model loading...",
|
| 87 |
)
|
| 88 |
# Get the chat model and build the graph
|
|
|
|
| 211 |
# Add session_hash to arguments
|
| 212 |
new_args = args + (request.session_hash,)
|
| 213 |
if compute_mode == "local":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 214 |
# Call the workflow function with the @spaces.GPU decorator
|
| 215 |
for value in run_workflow_local(*new_args):
|
| 216 |
yield value
|
|
|
|
| 654 |
# For S3 (need AWS_ACCESS_KEY_ID and AWS_ACCESS_KEY_SECRET)
|
| 655 |
download_file_from_bucket("r-help-chat", "db.zip", "db.zip")
|
| 656 |
## For Dropbox (shared file - key is in URL)
|
| 657 |
+
# shared_link = "https://www.dropbox.com/scl/fi/jx90g5lorpgkkyyzeurtc/db.zip?rlkey=wvqa3p9hdy4rmod1r8yf2am09&st=l9tsam56&dl=0"
|
| 658 |
+
# output_filename = "db.zip"
|
| 659 |
+
# download_dropbox_file(shared_link, output_filename)
|
| 660 |
|
| 661 |
return None
|
| 662 |
|
main.py
CHANGED
|
@@ -5,7 +5,6 @@ from langchain_core.output_parsers import StrOutputParser
|
|
| 5 |
from langgraph.checkpoint.memory import MemorySaver
|
| 6 |
from langchain_core.messages import ToolMessage
|
| 7 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 8 |
-
from huggingface_hub import snapshot_download
|
| 9 |
from datetime import datetime
|
| 10 |
from dotenv import load_dotenv
|
| 11 |
import os
|
|
@@ -129,16 +128,6 @@ def ProcessDirectory(path, compute_mode):
|
|
| 129 |
print(f"Chroma: no change for {file_path}")
|
| 130 |
|
| 131 |
|
| 132 |
-
def DownloadChatModel():
|
| 133 |
-
"""
|
| 134 |
-
Downloads a chat model to a local directory.
|
| 135 |
-
"""
|
| 136 |
-
# Local directory is "./<repo_name>"
|
| 137 |
-
repo_name = model_id.split("/")[-1]
|
| 138 |
-
local_dir = f"./{repo_name}"
|
| 139 |
-
snapshot_download(model_id, local_dir=local_dir)
|
| 140 |
-
|
| 141 |
-
|
| 142 |
def GetChatModel(compute_mode):
|
| 143 |
"""
|
| 144 |
Get a chat model.
|
|
@@ -157,20 +146,11 @@ def GetChatModel(compute_mode):
|
|
| 157 |
if compute_mode == "local" and not torch.cuda.is_available():
|
| 158 |
raise Exception("Local chat model selected without GPU")
|
| 159 |
|
| 160 |
-
# Use local directory for model if it exists
|
| 161 |
-
repo_name = model_id.split("/")[-1]
|
| 162 |
-
local_dir = f"./{repo_name}"
|
| 163 |
-
if os.path.isdir(local_dir):
|
| 164 |
-
print("Using local directory for model")
|
| 165 |
-
id_or_dir = local_dir
|
| 166 |
-
else:
|
| 167 |
-
id_or_dir = model_id
|
| 168 |
-
|
| 169 |
# Define the pipeline to pass to the HuggingFacePipeline class
|
| 170 |
# https://huggingface.co/blog/langchain
|
| 171 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
| 172 |
model = AutoModelForCausalLM.from_pretrained(
|
| 173 |
-
|
| 174 |
# We need this to load the model in BF16 instead of fp32 (torch.float)
|
| 175 |
torch_dtype=torch.bfloat16,
|
| 176 |
)
|
|
|
|
| 5 |
from langgraph.checkpoint.memory import MemorySaver
|
| 6 |
from langchain_core.messages import ToolMessage
|
| 7 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
|
|
| 8 |
from datetime import datetime
|
| 9 |
from dotenv import load_dotenv
|
| 10 |
import os
|
|
|
|
| 128 |
print(f"Chroma: no change for {file_path}")
|
| 129 |
|
| 130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
def GetChatModel(compute_mode):
|
| 132 |
"""
|
| 133 |
Get a chat model.
|
|
|
|
| 146 |
if compute_mode == "local" and not torch.cuda.is_available():
|
| 147 |
raise Exception("Local chat model selected without GPU")
|
| 148 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
# Define the pipeline to pass to the HuggingFacePipeline class
|
| 150 |
# https://huggingface.co/blog/langchain
|
| 151 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 152 |
model = AutoModelForCausalLM.from_pretrained(
|
| 153 |
+
model_id,
|
| 154 |
# We need this to load the model in BF16 instead of fp32 (torch.float)
|
| 155 |
torch_dtype=torch.bfloat16,
|
| 156 |
)
|