Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| modelo = "tu_usuario/mi-ia-chatbot" # tu modelo subido | |
| tokenizer = AutoTokenizer.from_pretrained(modelo) | |
| model = AutoModelForCausalLM.from_pretrained(modelo) | |
| def responder(mensaje, historial): | |
| if historial is None: | |
| historial = [] | |
| entradas = tokenizer.encode(mensaje + tokenizer.eos_token, return_tensors="pt") | |
| salida = model.generate( | |
| entradas, | |
| max_length=150, | |
| pad_token_id=tokenizer.eos_token_id, | |
| temperature=0.7, | |
| top_p=0.9 | |
| ) | |
| respuesta = tokenizer.decode(salida[:, entradas.shape[-1]:][0], skip_special_tokens=True) | |
| historial.append((mensaje, respuesta)) | |
| return historial, historial | |
| demo = gr.Blocks() | |
| with demo: | |
| chat = gr.Chatbot() | |
| entrada = gr.Textbox() | |
| entrada.submit(responder, [entrada, chat], [chat, chat]) | |
| demo.launch() | |