Spaces:
Sleeping
Sleeping
| # from fastapi import FastAPI | |
| # from pydantic import BaseModel | |
| # from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList | |
| # import torch | |
| # app = FastAPI() | |
| # model_id = "HuggingFaceTB/SmolLM2-360M" | |
| # tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| # model = AutoModelForCausalLM.from_pretrained(model_id) | |
| # class ChatRequest(BaseModel): | |
| # context: str # Historial de la conversación, como texto | |
| # class NewlineStoppingCriteria(StoppingCriteria): | |
| # def __init__(self, prompt_len, tokenizer): | |
| # super().__init__() | |
| # self.prompt_len = prompt_len | |
| # self.tokenizer = tokenizer | |
| # def __call__(self, input_ids, scores, **kwargs): | |
| # # Chequea si después del prompt hay un token de salto de línea | |
| # gen_tokens = input_ids[0][self.prompt_len:] | |
| # gen_text = self.tokenizer.decode(gen_tokens, skip_special_tokens=True) | |
| # return '\n' in gen_text | |
| # @app.post("/chat/demo_base") | |
| # async def chat_demo_base(request: ChatRequest): | |
| # prompt = ( | |
| # "Conversacion 1:\n" | |
| # "-Dauro: -Hola Juanjo.\n" | |
| # "-Juanjo: -¿Qué tal?\n" | |
| # "-Dauro: -Bien, ¿y tú?\n\n" | |
| # "Conversacion 2:\n" | |
| # "-Juanjo: -Oye Asistente, ¿puedes mirar esto?\n" | |
| # "-Asistente: -Por supuesto, dime.\n\n" | |
| # f"Conversacion 3:\n{request.context}\n" | |
| # ) | |
| # inputs = tokenizer(prompt, return_tensors="pt") | |
| # input_ids = inputs["input_ids"] | |
| # attention_mask = inputs["attention_mask"] | |
| # stopping_criteria = StoppingCriteriaList([ | |
| # NewlineStoppingCriteria(prompt_len=input_ids.shape[1], tokenizer=tokenizer) | |
| # ]) | |
| # output = model.generate( | |
| # input_ids=input_ids, | |
| # attention_mask=attention_mask, | |
| # max_new_tokens=15, | |
| # temperature=0.9, | |
| # top_p=0.8, | |
| # do_sample=True, | |
| # pad_token_id=tokenizer.eos_token_id if hasattr(tokenizer, "eos_token_id") else None, | |
| # stopping_criteria=stopping_criteria, | |
| # ) | |
| # generated_text = tokenizer.decode(output[0], skip_special_tokens=True) | |
| # # Solo el fragmento después del prompt | |
| # continuation = generated_text[len(prompt):].split('\n')[0] | |
| # return {"generated_text": generated_text} | |
| from fastapi import FastAPI | |
| from pydantic import BaseModel | |
| from typing import List, Optional | |
| app = FastAPI() | |
| # Almacenamiento en memoria temporal | |
| registro_actual = {} | |
| # Modelo de entrada | |
| class DialogoEntrada(BaseModel): | |
| enunciado: str | |
| personajes: List[str] # lista de 3 personajes | |
| relato_inicial: str | |
| final_1: str | |
| final_2: str | |
| final_3: str | |
| # Modelo de salida | |
| class DialogoSalida(BaseModel): | |
| enunciado: str | |
| personajes: List[str] | |
| relato_inicial: str | |
| final_1: str | |
| final_2: str | |
| final_3: str | |
| async def registrar_dialogo(dialogo: DialogoEntrada): | |
| global registro_actual | |
| registro_actual = dialogo.dict() # Sobrescribe el contenido anterior | |
| return {"status": "registro guardado"} | |
| async def obtener_y_limpiar(): | |
| global registro_actual | |
| if not registro_actual: | |
| return None | |
| salida = registro_actual | |
| registro_actual = {} # Limpia después de devolver | |
| return salida | |