Update app.py
Browse files
app.py
CHANGED
|
@@ -2,7 +2,6 @@
|
|
| 2 |
#app.py_144
|
| 3 |
#Uniquement Granite 3b instruct
|
| 4 |
|
| 5 |
-
|
| 6 |
import os
|
| 7 |
import gradio as gr
|
| 8 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
@@ -11,14 +10,6 @@ from pptx.util import Inches, Pt
|
|
| 11 |
import torch
|
| 12 |
import time
|
| 13 |
|
| 14 |
-
#TEXT_MODELS = {
|
| 15 |
-
# "Utter-Project_EuroLLM-1.7B": "utter-project/EuroLLM-1.7B",
|
| 16 |
-
# "Mistral Nemo 2407 (GGUF)": "MisterAI/Bartowski_MistralAI_Mistral-Nemo-Instruct-2407-IQ4_XS.gguf",
|
| 17 |
-
## "Mixtral 8x7B": "mistralai/Mixtral-8x7B-v0.1",
|
| 18 |
-
# "Lucie 7B": "OpenLLM-France/Lucie-7B"
|
| 19 |
-
#}
|
| 20 |
-
|
| 21 |
-
|
| 22 |
# Configuration du modèle unique
|
| 23 |
MODEL_PATH = "ibm-granite/granite-3.1-3b-a800m-Instruct"
|
| 24 |
|
|
@@ -79,16 +70,14 @@ class PresentationGenerator:
|
|
| 79 |
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
| 80 |
self.model = AutoModelForCausalLM.from_pretrained(
|
| 81 |
MODEL_PATH,
|
| 82 |
-
torch_dtype=torch.float32,
|
| 83 |
-
device_map="auto"
|
| 84 |
)
|
| 85 |
-
self.model.eval()
|
| 86 |
print("Modèle initialisé avec succès!")
|
| 87 |
|
| 88 |
def generate_text(self, prompt, temperature=0.7, max_tokens=2048):
|
| 89 |
-
"""Génère le texte de la présentation selon le format Granite"""
|
| 90 |
try:
|
| 91 |
-
# Formatage du chat selon les instructions Granite
|
| 92 |
chat = [{"role": "user", "content": prompt}]
|
| 93 |
formatted_prompt = self.tokenizer.apply_chat_template(
|
| 94 |
chat,
|
|
@@ -96,7 +85,6 @@ class PresentationGenerator:
|
|
| 96 |
add_generation_prompt=True
|
| 97 |
)
|
| 98 |
|
| 99 |
-
# Tokenisation et génération
|
| 100 |
inputs = self.tokenizer(
|
| 101 |
formatted_prompt,
|
| 102 |
return_tensors="pt",
|
|
@@ -120,7 +108,6 @@ class PresentationGenerator:
|
|
| 120 |
raise
|
| 121 |
|
| 122 |
def parse_presentation_content(self, content):
|
| 123 |
-
"""Parse le contenu généré en sections pour les diapositives"""
|
| 124 |
slides = []
|
| 125 |
current_slide = None
|
| 126 |
|
|
@@ -143,14 +130,11 @@ class PresentationGenerator:
|
|
| 143 |
return slides
|
| 144 |
|
| 145 |
def create_presentation(self, slides):
|
| 146 |
-
"""Crée la présentation PowerPoint"""
|
| 147 |
prs = Presentation()
|
| 148 |
|
| 149 |
-
# Première diapo (titre)
|
| 150 |
title_slide = prs.slides.add_slide(prs.slide_layouts[0])
|
| 151 |
title_slide.shapes.title.text = slides[0]['title']
|
| 152 |
|
| 153 |
-
# Autres diapos
|
| 154 |
for slide in slides[1:]:
|
| 155 |
content_slide = prs.slides.add_slide(prs.slide_layouts[1])
|
| 156 |
content_slide.shapes.title.text = slide['title']
|
|
@@ -168,21 +152,19 @@ class PresentationGenerator:
|
|
| 168 |
# Timer global pour le suivi du temps
|
| 169 |
timer = ExecutionTimer()
|
| 170 |
|
| 171 |
-
def
|
| 172 |
-
return timer.get_status()
|
| 173 |
-
|
| 174 |
-
def generate_skeleton(text, temperature, max_tokens, status_output):
|
| 175 |
"""Génère le squelette de la présentation"""
|
| 176 |
try:
|
| 177 |
timer.start()
|
| 178 |
generator = PresentationGenerator()
|
| 179 |
|
| 180 |
-
# Génération du contenu
|
| 181 |
full_prompt = PREPROMPT + "\n\n" + text
|
| 182 |
generated_content = generator.generate_text(full_prompt, temperature, max_tokens)
|
| 183 |
|
|
|
|
| 184 |
timer.stop()
|
| 185 |
-
|
|
|
|
| 186 |
|
| 187 |
except Exception as e:
|
| 188 |
timer.stop()
|
|
@@ -196,7 +178,6 @@ def create_presentation_file(generated_content):
|
|
| 196 |
timer.start()
|
| 197 |
generator = PresentationGenerator()
|
| 198 |
|
| 199 |
-
# Création et sauvegarde de la présentation
|
| 200 |
slides = generator.parse_presentation_content(generated_content)
|
| 201 |
prs = generator.create_presentation(slides)
|
| 202 |
|
|
@@ -254,34 +235,25 @@ with gr.Blocks(theme=gr.themes.Glass()) as demo:
|
|
| 254 |
status_output = gr.Textbox(
|
| 255 |
label="Statut",
|
| 256 |
lines=2,
|
| 257 |
-
|
| 258 |
)
|
| 259 |
generated_content = gr.Textbox(
|
| 260 |
label="Contenu généré",
|
| 261 |
lines=10,
|
| 262 |
show_copy_button=True
|
| 263 |
)
|
| 264 |
-
create_presentation_btn = gr.Button("Créer
|
| 265 |
output_file = gr.File(
|
| 266 |
label="Présentation PowerPoint",
|
| 267 |
-
type="filepath"
|
| 268 |
)
|
| 269 |
|
| 270 |
-
# Mise à jour du statut en temps réel
|
| 271 |
-
status_output.change(
|
| 272 |
-
fn=update_status,
|
| 273 |
-
inputs=[],
|
| 274 |
-
outputs=[status_output],
|
| 275 |
-
every=1
|
| 276 |
-
)
|
| 277 |
-
|
| 278 |
generate_skeleton_btn.click(
|
| 279 |
fn=generate_skeleton,
|
| 280 |
inputs=[
|
| 281 |
input_text,
|
| 282 |
temperature,
|
| 283 |
-
max_tokens
|
| 284 |
-
status_output
|
| 285 |
],
|
| 286 |
outputs=[
|
| 287 |
status_output,
|
|
@@ -304,4 +276,3 @@ if __name__ == "__main__":
|
|
| 304 |
|
| 305 |
|
| 306 |
|
| 307 |
-
|
|
|
|
| 2 |
#app.py_144
|
| 3 |
#Uniquement Granite 3b instruct
|
| 4 |
|
|
|
|
| 5 |
import os
|
| 6 |
import gradio as gr
|
| 7 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
|
| 10 |
import torch
|
| 11 |
import time
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
# Configuration du modèle unique
|
| 14 |
MODEL_PATH = "ibm-granite/granite-3.1-3b-a800m-Instruct"
|
| 15 |
|
|
|
|
| 70 |
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
| 71 |
self.model = AutoModelForCausalLM.from_pretrained(
|
| 72 |
MODEL_PATH,
|
| 73 |
+
torch_dtype=torch.float32,
|
| 74 |
+
device_map="auto"
|
| 75 |
)
|
| 76 |
+
self.model.eval()
|
| 77 |
print("Modèle initialisé avec succès!")
|
| 78 |
|
| 79 |
def generate_text(self, prompt, temperature=0.7, max_tokens=2048):
|
|
|
|
| 80 |
try:
|
|
|
|
| 81 |
chat = [{"role": "user", "content": prompt}]
|
| 82 |
formatted_prompt = self.tokenizer.apply_chat_template(
|
| 83 |
chat,
|
|
|
|
| 85 |
add_generation_prompt=True
|
| 86 |
)
|
| 87 |
|
|
|
|
| 88 |
inputs = self.tokenizer(
|
| 89 |
formatted_prompt,
|
| 90 |
return_tensors="pt",
|
|
|
|
| 108 |
raise
|
| 109 |
|
| 110 |
def parse_presentation_content(self, content):
|
|
|
|
| 111 |
slides = []
|
| 112 |
current_slide = None
|
| 113 |
|
|
|
|
| 130 |
return slides
|
| 131 |
|
| 132 |
def create_presentation(self, slides):
|
|
|
|
| 133 |
prs = Presentation()
|
| 134 |
|
|
|
|
| 135 |
title_slide = prs.slides.add_slide(prs.slide_layouts[0])
|
| 136 |
title_slide.shapes.title.text = slides[0]['title']
|
| 137 |
|
|
|
|
| 138 |
for slide in slides[1:]:
|
| 139 |
content_slide = prs.slides.add_slide(prs.slide_layouts[1])
|
| 140 |
content_slide.shapes.title.text = slide['title']
|
|
|
|
| 152 |
# Timer global pour le suivi du temps
|
| 153 |
timer = ExecutionTimer()
|
| 154 |
|
| 155 |
+
def generate_skeleton(text, temperature, max_tokens):
|
|
|
|
|
|
|
|
|
|
| 156 |
"""Génère le squelette de la présentation"""
|
| 157 |
try:
|
| 158 |
timer.start()
|
| 159 |
generator = PresentationGenerator()
|
| 160 |
|
|
|
|
| 161 |
full_prompt = PREPROMPT + "\n\n" + text
|
| 162 |
generated_content = generator.generate_text(full_prompt, temperature, max_tokens)
|
| 163 |
|
| 164 |
+
status = timer.get_status()
|
| 165 |
timer.stop()
|
| 166 |
+
|
| 167 |
+
return status, generated_content, gr.update(visible=True)
|
| 168 |
|
| 169 |
except Exception as e:
|
| 170 |
timer.stop()
|
|
|
|
| 178 |
timer.start()
|
| 179 |
generator = PresentationGenerator()
|
| 180 |
|
|
|
|
| 181 |
slides = generator.parse_presentation_content(generated_content)
|
| 182 |
prs = generator.create_presentation(slides)
|
| 183 |
|
|
|
|
| 235 |
status_output = gr.Textbox(
|
| 236 |
label="Statut",
|
| 237 |
lines=2,
|
| 238 |
+
value="En attente..."
|
| 239 |
)
|
| 240 |
generated_content = gr.Textbox(
|
| 241 |
label="Contenu généré",
|
| 242 |
lines=10,
|
| 243 |
show_copy_button=True
|
| 244 |
)
|
| 245 |
+
create_presentation_btn = gr.Button("Créer Présentation", visible=False)
|
| 246 |
output_file = gr.File(
|
| 247 |
label="Présentation PowerPoint",
|
| 248 |
+
type="filepath"
|
| 249 |
)
|
| 250 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 251 |
generate_skeleton_btn.click(
|
| 252 |
fn=generate_skeleton,
|
| 253 |
inputs=[
|
| 254 |
input_text,
|
| 255 |
temperature,
|
| 256 |
+
max_tokens
|
|
|
|
| 257 |
],
|
| 258 |
outputs=[
|
| 259 |
status_output,
|
|
|
|
| 276 |
|
| 277 |
|
| 278 |
|
|
|