Ramaravind commited on
Commit
b5f56f6
·
verified ·
1 Parent(s): 95a0410

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -3
app.py CHANGED
@@ -1,7 +1,80 @@
1
- from transformers import pipeline
 
 
 
 
 
 
 
2
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es")
 
 
 
 
 
5
 
6
- demo = gr.Interface.from_pipeline(pipe)
7
  demo.launch()
 
1
+ # from transformers import pipeline
2
+ # import gradio as gr
3
+
4
+ # pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es")
5
+
6
+ # demo = gr.Interface.from_pipeline(pipe)
7
+ # demo.launch()
8
+
9
  import gradio as gr
10
+ from transformers import pipeline
11
+ import torch
12
+
13
+ # Define the model to use.
14
+ MODEL_NAME = "google/flan-t5-small"
15
+
16
+ # Set up the pipeline, specifying the task and the model.
17
+ # The pipeline handles tokenization and model inference.
18
+ # Using device="cuda" if a GPU is available, otherwise falls back to CPU.
19
+ device = "cuda" if torch.cuda.is_available() else "cpu"
20
+ pipe = pipeline("text2text-generation", model=MODEL_NAME, device=device)
21
+
22
+ # Define the function that constructs the prompt and calls the pipeline.
23
+ def generate_text(user_input, prompt_template):
24
+ """
25
+ Combines user input with a template and calls the Hugging Face transformers pipeline.
26
+ """
27
+ # Create the full prompt based on the template and user input.
28
+ full_prompt = prompt_template.format(user_input=user_input)
29
+
30
+ # Use the pipeline to generate text.
31
+ try:
32
+ # The pipeline returns a list of dictionaries; we extract the generated text.
33
+ response = pipe(full_prompt, max_new_tokens=100)
34
+ return response[0]['generated_text']
35
+ except Exception as e:
36
+ return f"Error: {e}"
37
+
38
+ # Define the Gradio interface.
39
+ with gr.Blocks() as demo:
40
+ gr.Markdown("# Lightweight LLM Demo")
41
+ gr.Markdown("Enter text and select a prompt to generate an AI response.")
42
+
43
+ with gr.Row():
44
+ with gr.Column(scale=1):
45
+ # Textbox for user input
46
+ user_input = gr.Textbox(
47
+ label="Your Input Text",
48
+ placeholder="Type here...",
49
+ lines=5
50
+ )
51
+
52
+ # Dropdown to select a prompt template
53
+ prompt_template = gr.Dropdown(
54
+ label="Choose a Prompt Template",
55
+ choices=[
56
+ "Summarize this: {user_input}",
57
+ "Answer the following question: {user_input}",
58
+ "Rewrite this text to be more formal: {user_input}"
59
+ ],
60
+ value="Summarize this: {user_input}"
61
+ )
62
+
63
+ # Button to trigger the generation
64
+ generate_button = gr.Button("Generate")
65
+
66
+ with gr.Column(scale=2):
67
+ # Textbox to display the output
68
+ output_text = gr.Textbox(
69
+ label="Generated Output",
70
+ lines=10
71
+ )
72
 
73
+ # Define the action for the button click
74
+ generate_button.click(
75
+ fn=generate_text,
76
+ inputs=[user_input, prompt_template],
77
+ outputs=output_text
78
+ )
79
 
 
80
  demo.launch()