Update app.py
Browse files
app.py
CHANGED
|
@@ -9,9 +9,11 @@ model_cache = {}
|
|
| 9 |
|
| 10 |
# Available models
|
| 11 |
AVAILABLE_MODELS = {
|
| 12 |
-
"Apollo-1-4B": "
|
| 13 |
-
"Apollo-1-8B": "
|
| 14 |
-
"Apollo-1-2B": "
|
|
|
|
|
|
|
| 15 |
}
|
| 16 |
|
| 17 |
@spaces.GPU
|
|
@@ -188,14 +190,16 @@ def generate(
|
|
| 188 |
def create_interface():
|
| 189 |
with gr.Blocks(title="Multi-Model Chat") as demo:
|
| 190 |
gr.Markdown("""
|
| 191 |
-
# π
|
| 192 |
|
| 193 |
-
Chat with the
|
| 194 |
|
| 195 |
**Available Models:**
|
| 196 |
- Apollo-1-4B (4 billion parameters)
|
| 197 |
- Apollo-1-8B (8 billion parameters)
|
| 198 |
- Apollo-1-2B (2 billion parameters)
|
|
|
|
|
|
|
| 199 |
""")
|
| 200 |
|
| 201 |
with gr.Row():
|
|
@@ -282,20 +286,6 @@ def create_interface():
|
|
| 282 |
|
| 283 |
model_selector.change(model_changed, model_selector, chatbot)
|
| 284 |
|
| 285 |
-
gr.Markdown("""
|
| 286 |
-
---
|
| 287 |
-
|
| 288 |
-
### About the Apollo-1 Models
|
| 289 |
-
**Apollo-1-2B**: 2 billion parameter model by Noema Research, designed for fast and quick infrencing
|
| 290 |
-
|
| 291 |
-
**Apollo-1-4B**: 4 billion parameter model by Noema Research, optimisd for efficient conversation and text generation
|
| 292 |
-
|
| 293 |
-
**Apollo-1-8B**: 8 billion parameter model by Noema Research, offering enhanced capabilities and better performance for complex tasks
|
| 294 |
-
|
| 295 |
-
All models are designed for conversational AI and support various text generation tasks. The 8B model provides more sophisticated responses but requires more computational resources.
|
| 296 |
-
|
| 297 |
-
This Space uses ZeroGPU for efficient GPU allocation across both model sizes.
|
| 298 |
-
""")
|
| 299 |
|
| 300 |
return demo
|
| 301 |
|
|
|
|
| 9 |
|
| 10 |
# Available models
|
| 11 |
AVAILABLE_MODELS = {
|
| 12 |
+
"Apollo-1-4B": "Loom-Labs/Apollo-1-4B",
|
| 13 |
+
"Apollo-1-8B": "Loom-Labs/Apollo-1-8B",
|
| 14 |
+
"Apollo-1-2B": "Loom-Labs/Apollo-1-2B",
|
| 15 |
+
"Daedalus-1-2B": "Loom-Labs/Daedalus-1-2B",
|
| 16 |
+
"Daedalus-1-8B": "Loom-Labs/Daedalus-1-8B",
|
| 17 |
}
|
| 18 |
|
| 19 |
@spaces.GPU
|
|
|
|
| 190 |
def create_interface():
|
| 191 |
with gr.Blocks(title="Multi-Model Chat") as demo:
|
| 192 |
gr.Markdown("""
|
| 193 |
+
# π Loom Labs Model Chat Interface
|
| 194 |
|
| 195 |
+
Chat with the models by Loom Labs.
|
| 196 |
|
| 197 |
**Available Models:**
|
| 198 |
- Apollo-1-4B (4 billion parameters)
|
| 199 |
- Apollo-1-8B (8 billion parameters)
|
| 200 |
- Apollo-1-2B (2 billion parameters)
|
| 201 |
+
- Daedalus-1-2B (2 billion parameters)
|
| 202 |
+
- Daedalus-1-8B (8 billion parameters)
|
| 203 |
""")
|
| 204 |
|
| 205 |
with gr.Row():
|
|
|
|
| 286 |
|
| 287 |
model_selector.change(model_changed, model_selector, chatbot)
|
| 288 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 289 |
|
| 290 |
return demo
|
| 291 |
|