Spaces:
Paused
Paused
Merge branch 'main' of hf.co:spaces/SeaLLMs/SeaLLM-7B-v2.5
Browse files
multipurpose_chatbot/demos/langchain_web_search.py
CHANGED
|
@@ -20,7 +20,7 @@ try:
|
|
| 20 |
from langchain.agents import AgentExecutor, load_tools
|
| 21 |
from langchain.agents.format_scratchpad import format_log_to_str
|
| 22 |
from langchain.agents.output_parsers import (
|
| 23 |
-
ReActJsonSingleInputOutputParser
|
| 24 |
)
|
| 25 |
from langchain.tools.render import render_text_description, render_text_description_and_args
|
| 26 |
from langchain_community.utilities import SerpAPIWrapper
|
|
|
|
| 20 |
from langchain.agents import AgentExecutor, load_tools
|
| 21 |
from langchain.agents.format_scratchpad import format_log_to_str
|
| 22 |
from langchain.agents.output_parsers import (
|
| 23 |
+
ReActJsonSingleInputOutputParser
|
| 24 |
)
|
| 25 |
from langchain.tools.render import render_text_description, render_text_description_and_args
|
| 26 |
from langchain_community.utilities import SerpAPIWrapper
|
multipurpose_chatbot/engines/transformers_engine.py
CHANGED
|
@@ -546,7 +546,7 @@ class TransformersEngine(BaseEngine):
|
|
| 546 |
raise gr.Error(message_safety)
|
| 547 |
|
| 548 |
# @maybe_spaces_gpu
|
| 549 |
-
def generate_yield_string(self, prompt, temperature, max_tokens, stop_strings: Optional[Tuple[str]] = None, **kwargs):
|
| 550 |
|
| 551 |
# ! MUST PUT INSIDE torch.no_grad() otherwise it will overflow OOM
|
| 552 |
import sys
|
|
@@ -564,7 +564,7 @@ class TransformersEngine(BaseEngine):
|
|
| 564 |
inputs = self.tokenizer(prompt, return_tensors='pt')
|
| 565 |
# whether to print the full prompts
|
| 566 |
retok_full_prompt = self.tokenizer.decode(inputs.input_ids[0], skip_special_tokens=False)
|
| 567 |
-
print(f"retok_full_prompt:\n{retok_full_prompt}>>>>")
|
| 568 |
begin_bos = inputs.input_ids[0][0] == self.tokenizer.bos_token_id
|
| 569 |
print(f'begin_bos: {begin_bos}')
|
| 570 |
|
|
|
|
| 546 |
raise gr.Error(message_safety)
|
| 547 |
|
| 548 |
# @maybe_spaces_gpu
|
| 549 |
+
def generate_yield_string(self, prompt, temperature=0.7, max_tokens=1024, stop_strings: Optional[Tuple[str]] = None, **kwargs):
|
| 550 |
|
| 551 |
# ! MUST PUT INSIDE torch.no_grad() otherwise it will overflow OOM
|
| 552 |
import sys
|
|
|
|
| 564 |
inputs = self.tokenizer(prompt, return_tensors='pt')
|
| 565 |
# whether to print the full prompts
|
| 566 |
retok_full_prompt = self.tokenizer.decode(inputs.input_ids[0], skip_special_tokens=False)
|
| 567 |
+
# print(f"retok_full_prompt:\n{retok_full_prompt}>>>>")
|
| 568 |
begin_bos = inputs.input_ids[0][0] == self.tokenizer.bos_token_id
|
| 569 |
print(f'begin_bos: {begin_bos}')
|
| 570 |
|