Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,70 +1,92 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
|
|
|
|
|
|
|
|
|
|
| 3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
-
def
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
system_message,
|
| 9 |
-
max_tokens,
|
| 10 |
-
temperature,
|
| 11 |
-
top_p,
|
| 12 |
-
hf_token: gr.OAuthToken,
|
| 13 |
-
):
|
| 14 |
-
"""
|
| 15 |
-
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
| 16 |
-
"""
|
| 17 |
-
client = InferenceClient(token=hf_token.token, model="openai/gpt-oss-20b")
|
| 18 |
|
| 19 |
-
|
|
|
|
| 20 |
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
|
| 25 |
-
|
|
|
|
| 26 |
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
top_p=top_p,
|
| 33 |
-
):
|
| 34 |
-
choices = message.choices
|
| 35 |
-
token = ""
|
| 36 |
-
if len(choices) and choices[0].delta.content:
|
| 37 |
-
token = choices[0].delta.content
|
| 38 |
|
| 39 |
-
|
| 40 |
-
yield response
|
| 41 |
|
|
|
|
| 42 |
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
)
|
| 62 |
|
| 63 |
with gr.Blocks() as demo:
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
|
| 69 |
if __name__ == "__main__":
|
| 70 |
-
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import torch
|
| 3 |
+
import time
|
| 4 |
+
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
| 5 |
+
from nltk.tokenize import sent_tokenize
|
| 6 |
|
| 7 |
+
class DipperParaphraser(object):
|
| 8 |
+
def __init__(self, model="kalpeshk2011/dipper-paraphraser-xxl", verbose=True):
|
| 9 |
+
time1 = time.time()
|
| 10 |
+
self.tokenizer = T5Tokenizer.from_pretrained('google/t5-v1_1-xxl')
|
| 11 |
+
self.model = T5ForConditionalGeneration.from_pretrained(model)
|
| 12 |
+
if verbose:
|
| 13 |
+
print(f"{model} model loaded in {time.time() - time1}")
|
| 14 |
+
self.model.cuda()
|
| 15 |
+
self.model.eval()
|
| 16 |
|
| 17 |
+
def paraphrase(self, input_text, lex_diversity, order_diversity, prefix="", sent_interval=3, **kwargs):
|
| 18 |
+
assert lex_diversity in [0, 20, 40, 60, 80, 100], "Lexical diversity must be one of 0, 20, 40, 60, 80, 100."
|
| 19 |
+
assert order_diversity in [0, 20, 40, 60, 80, 100], "Order diversity must be one of 0, 20, 40, 60, 80, 100."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
+
lex_code = int(100 - lex_diversity)
|
| 22 |
+
order_code = int(100 - order_diversity)
|
| 23 |
|
| 24 |
+
input_text = " ".join(input_text.split())
|
| 25 |
+
sentences = sent_tokenize(input_text)
|
| 26 |
+
prefix = " ".join(prefix.replace("\n", " ").split())
|
| 27 |
+
output_text = ""
|
| 28 |
|
| 29 |
+
for sent_idx in range(0, len(sentences), sent_interval):
|
| 30 |
+
curr_sent_window = " ".join(sentences[sent_idx:sent_idx + sent_interval])
|
| 31 |
+
final_input_text = f"lexical = {lex_code}, order = {order_code}"
|
| 32 |
+
if prefix:
|
| 33 |
+
final_input_text += f" {prefix}"
|
| 34 |
+
final_input_text += f" <sent> {curr_sent_window} </sent>"
|
| 35 |
|
| 36 |
+
final_input = self.tokenizer([final_input_text], return_tensors="pt")
|
| 37 |
+
final_input = {k: v.cuda() for k, v in final_input.items()}
|
| 38 |
|
| 39 |
+
with torch.inference_mode():
|
| 40 |
+
outputs = self.model.generate(**final_input, **kwargs)
|
| 41 |
+
outputs = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
| 42 |
+
prefix += " " + outputs[0]
|
| 43 |
+
output_text += " " + outputs[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
+
return output_text.strip()
|
|
|
|
| 46 |
|
| 47 |
+
dp = None
|
| 48 |
|
| 49 |
+
def paraphrase_interface(prompt, input_text, lex_diversity, order_diversity, sent_interval, top_p, top_k, max_length, do_sample):
|
| 50 |
+
global dp
|
| 51 |
+
if dp is None:
|
| 52 |
+
dp = DipperParaphraser(verbose=False)
|
| 53 |
+
kwargs = {
|
| 54 |
+
"do_sample": do_sample,
|
| 55 |
+
"top_p": top_p,
|
| 56 |
+
"top_k": top_k if top_k else None,
|
| 57 |
+
"max_length": max_length,
|
| 58 |
+
}
|
| 59 |
+
return dp.paraphrase(
|
| 60 |
+
input_text,
|
| 61 |
+
lex_diversity=lex_diversity,
|
| 62 |
+
order_diversity=order_diversity,
|
| 63 |
+
prefix=prompt,
|
| 64 |
+
sent_interval=sent_interval,
|
| 65 |
+
**kwargs
|
| 66 |
+
)
|
|
|
|
| 67 |
|
| 68 |
with gr.Blocks() as demo:
|
| 69 |
+
gr.Markdown("# DIPPER Paraphraser XXL")
|
| 70 |
+
with gr.Row():
|
| 71 |
+
with gr.Column():
|
| 72 |
+
prompt = gr.Textbox(label="Prompt (Optional)", value="In a shocking finding, scientist discovered a herd of unicorns living in a remote valley.")
|
| 73 |
+
input_text = gr.Textbox(label="Text to Paraphrase", lines=8, value="They have never been known to mingle with humans. Today, it is believed these unicorns live in an unspoilt environment which is surrounded by mountains. Its edge is protected by a thick wattle of wattle trees, giving it a majestic appearance. Along with their so-called miracle of multicolored coat, their golden coloured feather makes them look like mirages. Some of them are rumored to be capable of speaking a large amount of different languages. They feed on elk and goats as they were selected from those animals that possess a fierceness to them, and can \"eat\" them with their long horns.")
|
| 74 |
+
lex_diversity = gr.Dropdown(label="Lexical Diversity", choices=[0, 20, 40, 60, 80, 100], value=60)
|
| 75 |
+
order_diversity = gr.Dropdown(label="Order Diversity", choices=[0, 20, 40, 60, 80, 100], value=0)
|
| 76 |
+
sent_interval = gr.Number(label="Sentence Interval", value=3, precision=0)
|
| 77 |
+
top_p = gr.Number(label="Top P (sampling)", value=0.75)
|
| 78 |
+
top_k = gr.Number(label="Top K (sampling, None for default)", value=None, precision=0)
|
| 79 |
+
max_length = gr.Number(label="Max Length", value=512, precision=0)
|
| 80 |
+
do_sample = gr.Checkbox(label="Enable Sampling", value=True)
|
| 81 |
+
btn = gr.Button("Paraphrase")
|
| 82 |
+
with gr.Column():
|
| 83 |
+
output = gr.Textbox(label="Paraphrased Output", lines=8)
|
| 84 |
|
| 85 |
+
btn.click(
|
| 86 |
+
paraphrase_interface,
|
| 87 |
+
inputs=[prompt, input_text, lex_diversity, order_diversity, sent_interval, top_p, top_k, max_length, do_sample],
|
| 88 |
+
outputs=output
|
| 89 |
+
)
|
| 90 |
|
| 91 |
if __name__ == "__main__":
|
| 92 |
+
demo.launch()
|