File size: 4,745 Bytes
912162f 1014901 54cd9dd c113217 1014901 c113217 1014901 c113217 1014901 c113217 1014901 c113217 1014901 c113217 1014901 c113217 1014901 c113217 1014901 c113217 1014901 c113217 1014901 c113217 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 |
import requests
import base64
import mimetypes
import os
from pathlib import Path
from typing import Any, Dict, List
import gradio as gr
from openai import OpenAI
headers = {"Authorization": f"Bearer {API_KEY}"}
payload = {"inputs": "Describe this image", "parameters": {}}
res = requests.post(BASE_URL, headers=headers, json=payload)
print(res.json())
# Modelo por defecto
DEFAULT_MODEL = "LLaVA-OneVision-1.5-8B-Instruct"
# Cliente OpenAI-compatible (usa el endpoint de Hugging Face o el tuyo)
_client = OpenAI(
base_url=os.getenv("BASE_URL", ""),
api_key=os.getenv("API_KEY", ""),
)
def _data_url(path: str) -> str:
mime, _ = mimetypes.guess_type(path)
mime = mime or "application/octet-stream"
data = base64.b64encode(Path(path).read_bytes()).decode("utf-8")
return f"data:{mime};base64,{data}"
def _image_content(path: str) -> Dict[str, Any]:
return {"type": "image_url", "image_url": {"url": _data_url(path)}}
def _text_content(text: str) -> Dict[str, Any]:
return {"type": "text", "text": text}
def _message(role: str, content: Any) -> Dict[str, Any]:
return {"role": role, "content": content}
def _build_user_message(message: Dict[str, Any]) -> Dict[str, Any]:
files = message.get("files") or []
text = (message.get("text") or "").strip()
# 🔹 Si no hay texto, añadimos un prompt nutricional por defecto
if not text:
text = (
"Analiza la imagen del plato de comida y describe los alimentos que contiene. "
"Indica una estimación de calorías, proteínas, carbohidratos y grasas. "
"Responde en formato breve y estructurado."
)
content: List[Dict[str, Any]] = [_image_content(p) for p in files]
if text:
content.append(_text_content(text))
return _message("user", content)
def _convert_history(history: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
msgs: List[Dict[str, Any]] = []
user_content: List[Dict[str, Any]] = []
for turn in history or []:
role, content = turn.get("role"), turn.get("content")
if role == "user":
if isinstance(content, str):
user_content.append(_text_content(content))
elif isinstance(content, tuple):
user_content.extend(_image_content(path) for path in content if path)
elif role == "assistant":
msgs.append(_message("user", user_content.copy()))
user_content.clear()
msgs.append(_message("assistant", content))
return msgs
def stream_response(message: Dict[str, Any], history: List[Dict[str, Any]], model_name: str = DEFAULT_MODEL):
messages = _convert_history(history)
messages.append(_build_user_message(message))
try:
stream = _client.chat.completions.create(
model=model_name,
messages=messages,
temperature=0.1,
top_p=1,
extra_body={
"repetition_penalty": 1.05,
"frequency_penalty": 0,
"presence_penalty": 0
},
stream=True
)
partial = ""
for chunk in stream:
delta = chunk.choices[0].delta.content
if delta:
partial += delta
yield partial
except Exception as e:
yield f"⚠️ Error al obtener respuesta: {e}"
def build_demo() -> gr.Blocks:
chatbot = gr.Chatbot(type="messages", allow_tags=["think"])
textbox = gr.MultimodalTextbox(
show_label=False,
placeholder="Subí una foto de tu comida para analizarla...",
file_types=["image"],
file_count="single",
max_plain_text_length=32768
)
model_selector = gr.Dropdown(
label="Modelo",
choices=[
("LLaVA-OneVision-1.5-8B-Instruct", "LLaVA-OneVision-1.5-8B-Instruct"),
("LLaVA-OneVision-1.5-4B-Instruct", "LLaVA-OneVision-1.5-4B-Instruct"),
],
value=DEFAULT_MODEL,
)
return gr.ChatInterface(
fn=stream_response,
type="messages",
multimodal=True,
chatbot=chatbot,
textbox=textbox,
title="🍽️ NasFit Vision AI",
description=(
"Subí una foto de tu comida y NasFit IA estimará su contenido nutricional. "
"Basado en **LLaVA-OneVision-1.5**, modelo multimodal open source con análisis visual avanzado. "
"Ideal para tracking nutricional inteligente."
),
additional_inputs=[model_selector],
additional_inputs_accordion=gr.Accordion("Opciones avanzadas", open=False),
).queue(default_concurrency_limit=8)
def main():
build_demo().launch()
if __name__ == "__main__":
main() |