Spaces:
Sleeping
Sleeping
| import os | |
| import asyncio | |
| from fastapi import FastAPI, HTTPException, Query | |
| from fastapi.responses import StreamingResponse | |
| from openai import AsyncOpenAI | |
| from collections import defaultdict | |
| app = FastAPI() | |
| # Define available models | |
| AVAILABLE_MODELS = { | |
| "openai/gpt-4.1": "OpenAI GPT-4.1", | |
| "openai/gpt-4.1-mini": "OpenAI GPT-4.1-mini", | |
| "openai/gpt-4.1-nano": "OpenAI GPT-4.1-nano", | |
| "openai/gpt-4o": "OpenAI GPT-4o", | |
| "openai/gpt-4o-mini": "OpenAI GPT-4o mini", | |
| "openai/o4-mini": "OpenAI o4-mini", | |
| "microsoft/MAI-DS-R1": "MAI-DS-R1", | |
| "microsoft/Phi-3.5-MoE-instruct": "Phi-3.5-MoE instruct (128k)", | |
| "microsoft/Phi-3.5-mini-instruct": "Phi-3.5-mini instruct (128k)", | |
| "microsoft/Phi-3.5-vision-instruct": "Phi-3.5-vision instruct (128k)", | |
| "microsoft/Phi-3-medium-128k-instruct": "Phi-3-medium instruct (128k)", | |
| "microsoft/Phi-3-medium-4k-instruct": "Phi-3-medium instruct (4k)", | |
| "microsoft/Phi-3-mini-128k-instruct": "Phi-3-mini instruct (128k)", | |
| "microsoft/Phi-3-small-128k-instruct": "Phi-3-small instruct (128k)", | |
| "microsoft/Phi-3-small-8k-instruct": "Phi-3-small instruct (8k)", | |
| "microsoft/Phi-4": "Phi-4", | |
| "microsoft/Phi-4-mini-instruct": "Phi-4-mini-instruct", | |
| "microsoft/Phi-4-multimodal-instruct": "Phi-4-multimodal-instruct", | |
| "ai21-labs/AI21-Jamba-1.5-Large": "AI21 Jamba 1.5 Large", | |
| "ai21-labs/AI21-Jamba-1.5-Mini": "AI21 Jamba 1.5 Mini", | |
| "mistral-ai/Codestral-2501": "Codestral 25.01", | |
| "cohere/Cohere-command-r": "Cohere Command R", | |
| "cohere/Cohere-command-r-08-2024": "Cohere Command R 08-2024", | |
| "cohere/Cohere-command-r-plus": "Cohere Command R+", | |
| "cohere/Cohere-command-r-plus-08-2024": "Cohere Command R+ 08-2024", | |
| "deepseek/DeepSeek-R1": "DeepSeek-R1", | |
| "deepseek/DeepSeek-V3-0324": "DeepSeek-V3-0324", | |
| "meta/Llama-3.2-11B-Vision-Instruct": "Llama-3.2-11B-Vision-Instruct", | |
| "meta/Llama-3.2-90B-Vision-Instruct": "Llama-3.2-90B-Vision-Instruct", | |
| "meta/Llama-3.3-70B-Instruct": "Llama-3.3-70B-Instruct", | |
| "meta/Llama-4-Maverick-17B-128E-Instruct-FP8": "Llama 4 Maverick 17B 128E Instruct FP8", | |
| "meta/Llama-4-Scout-17B-16E-Instruct": "Llama 4 Scout 17B 16E Instruct", | |
| "meta/Meta-Llama-3.1-405B-Instruct": "Meta-Llama-3.1-405B-Instruct", | |
| "meta/Meta-Llama-3.1-70B-Instruct": "Meta-Llama-3.1-70B-Instruct", | |
| "meta/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct", | |
| "meta/Meta-Llama-3-70B-Instruct": "Meta-Llama-3-70B-Instruct", | |
| "meta/Meta-Llama-3-8B-Instruct": "Meta-Llama-3-8B-Instruct", | |
| "mistral-ai/Ministral-3B": "Ministral 3B", | |
| "mistral-ai/Mistral-Large-2411": "Mistral Large 24.11", | |
| "mistral-ai/Mistral-Nemo": "Mistral Nemo", | |
| "mistral-ai/Mistral-large-2407": "Mistral Large (2407)", | |
| "mistral-ai/Mistral-small": "Mistral Small", | |
| "cohere/cohere-command-a": "Cohere Command A", | |
| "core42/jais-30b-chat": "JAIS 30b Chat", | |
| "mistral-ai/mistral-small-2503": "Mistral Small 3.1" | |
| } | |
| # Chat memory (in-memory) | |
| chat_histories = defaultdict(list) | |
| MAX_HISTORY = 100 # limit memory to avoid crashes | |
| # Generate response stream | |
| async def generate_ai_response(chat_id: str, model: str): | |
| token = os.getenv("GITHUB_TOKEN") | |
| if not token: | |
| raise HTTPException(status_code=500, detail="GitHub token not configured") | |
| endpoint = "https://models.github.ai/inference" | |
| if model not in AVAILABLE_MODELS: | |
| raise HTTPException( | |
| status_code=400, | |
| detail=f"Model not available. Choose from: {', '.join(AVAILABLE_MODELS.keys())}" | |
| ) | |
| client = AsyncOpenAI(base_url=endpoint, api_key=token) | |
| try: | |
| stream = await asyncio.wait_for( | |
| client.chat.completions.create( | |
| messages=chat_histories[chat_id], | |
| model=model, | |
| temperature=1.0, | |
| top_p=1.0, | |
| stream=True | |
| ), | |
| timeout=60 # Prevent hangs | |
| ) | |
| async for chunk in stream: | |
| if chunk.choices and chunk.choices[0].delta.content: | |
| content = chunk.choices[0].delta.content | |
| yield content | |
| chat_histories[chat_id].append({"role": "assistant", "content": content}) | |
| chat_histories[chat_id] = chat_histories[chat_id][-MAX_HISTORY:] | |
| except asyncio.TimeoutError: | |
| yield "Error: Response timed out." | |
| raise HTTPException(status_code=504, detail="Model timed out.") | |
| except Exception as err: | |
| yield f"Error: {str(err)}" | |
| raise HTTPException(status_code=500, detail="AI generation failed") | |
| # Chat endpoint | |
| async def generate_response( | |
| chat_id: str = Query(..., description="Unique chat ID"), | |
| prompt: str = Query(..., description="User message"), | |
| model: str = Query("openai/gpt-4.1-mini", description="Model to use") | |
| ): | |
| if not prompt: | |
| raise HTTPException(status_code=400, detail="Prompt cannot be empty") | |
| chat_histories[chat_id].append({"role": "user", "content": prompt}) | |
| chat_histories[chat_id] = chat_histories[chat_id][-MAX_HISTORY:] | |
| return StreamingResponse( | |
| generate_ai_response(chat_id, model), | |
| media_type="text/event-stream" | |
| ) | |
| # Optional: reset chat history | |
| async def reset_chat(chat_id: str = Query(..., description="ID of chat to reset")): | |
| if chat_id in chat_histories: | |
| chat_histories[chat_id].clear() | |
| return {"message": f"Chat {chat_id} history reset."} | |
| else: | |
| raise HTTPException(status_code=404, detail="Chat ID not found") | |
| # For ASGI servers like Uvicorn | |
| def get_app(): | |
| return app |