LeroyDyer commited on
Commit
cf605b8
Β·
verified Β·
1 Parent(s): 12ef2cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +487 -50
app.py CHANGED
@@ -4,7 +4,7 @@ from collections import defaultdict
4
  import json
5
  import os
6
  import re
7
- from time import time
8
  import uuid
9
  from typing import List, Dict, Any, Optional
10
  from dataclasses import dataclass
@@ -27,7 +27,6 @@ from openai import AsyncOpenAI, OpenAI
27
  import pyttsx3
28
  from rich.console import Console
29
 
30
-
31
  BASE_URL="http://localhost:1234/v1"
32
  BASE_API_KEY="not-needed"
33
  BASE_CLIENT = AsyncOpenAI(
@@ -44,15 +43,24 @@ console = Console()
44
  # --- Configuration ---
45
  LOCAL_BASE_URL = "http://localhost:1234/v1"
46
  LOCAL_API_KEY = "not-needed"
47
- # HuggingFace Spaces configuration
48
- HF_INFERENCE_URL = "https://api-inference.huggingface.co/models/"
49
- HF_API_KEY = os.getenv("HF_API_KEY", "")
50
-
51
  DEFAULT_TEMPERATURE = 0.7
52
  DEFAULT_MAX_TOKENS = 5000
53
  console = Console()
54
 
55
- #############################################################
 
 
 
 
 
 
 
 
 
56
  @dataclass
57
  class LLMMessage:
58
  role: str
@@ -61,7 +69,6 @@ class LLMMessage:
61
  conversation_id: str = None
62
  timestamp: float = None
63
  metadata: Dict[str, Any] = None
64
-
65
  def __post_init__(self):
66
  if self.message_id is None:
67
  self.message_id = str(uuid.uuid4())
@@ -75,7 +82,6 @@ class LLMRequest:
75
  message: LLMMessage
76
  response_event: str = None
77
  callback: Callable = None
78
-
79
  def __post_init__(self):
80
  if self.response_event is None:
81
  self.response_event = f"llm_response_{self.message.message_id}"
@@ -87,32 +93,27 @@ class LLMResponse:
87
  success: bool = True
88
  error: str = None
89
 
90
- #############################################################
91
  class EventManager:
92
  def __init__(self):
93
  self._handlers = defaultdict(list)
94
  self._lock = threading.Lock()
95
-
96
  def register(self, event: str, handler: Callable):
97
  with self._lock:
98
  self._handlers[event].append(handler)
99
-
100
  def unregister(self, event: str, handler: Callable):
101
  with self._lock:
102
  if event in self._handlers and handler in self._handlers[event]:
103
  self._handlers[event].remove(handler)
104
-
105
  def raise_event(self, event: str, data: Any):
106
  with self._lock:
107
  handlers = self._handlers[event][:]
108
-
109
  for handler in handlers:
110
  try:
111
  handler(data)
112
  except Exception as e:
113
  console.log(f"Error in event handler for {event}: {e}", style="bold red")
114
 
115
-
116
  EVENT_MANAGER = EventManager()
117
  def RegisterEvent(event: str, handler: Callable):
118
  EVENT_MANAGER.register(event, handler)
@@ -124,20 +125,6 @@ def UnregisterEvent(event: str, handler: Callable):
124
  EVENT_MANAGER.unregister(event, handler)
125
 
126
 
127
- #############################################################
128
- @dataclass
129
- class CanvasArtifact:
130
- id: str
131
- type: str # 'code', 'diagram', 'text', 'image'
132
- content: str
133
- title: str
134
- timestamp: float
135
- metadata: Dict[str, Any] = None
136
-
137
- def __post_init__(self):
138
- if self.metadata is None:
139
- self.metadata = {}
140
-
141
  class LLMAgent:
142
  """Main Agent Driver !
143
  Agent For Multiple messages at once ,
@@ -235,7 +222,7 @@ class LLMAgent:
235
  """Default generate function if none provided"""
236
  return await self.openai_generate(messages)
237
  def create_interface(self):
238
- """Create the full LCARS-styled interface without HuggingFace options"""
239
  lcars_css = """
240
  :root {
241
  --lcars-orange: #FF9900;
@@ -354,11 +341,37 @@ class LLMAgent:
354
  with gr.Column(scale=1):
355
  # Configuration Panel
356
  with gr.Column(elem_classes="lcars-panel"):
357
-
358
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359
  # Canvas Artifacts
360
  with gr.Column(elem_classes="lcars-panel"):
361
- gr.Markdown("""### 🎨 CANVAS ARTIFACTS""")
362
  artifact_display = gr.JSON(label="")
363
  with gr.Row():
364
  refresh_artifacts_btn = gr.Button("πŸ”„ Refresh", elem_classes="lcars-button")
@@ -367,8 +380,8 @@ class LLMAgent:
367
  with gr.Column(scale=2):
368
  # Code Canvas
369
  with gr.Accordion("πŸ’» COLLABORATIVE CODE CANVAS", open=False):
370
- code_editor = gr.Code(interactive=True,
371
- value="# Welcome to LCARS Collaborative Canvas\nprint('Hello, Starfleet!')",
372
  language="python",
373
  lines=15,
374
  label=""
@@ -379,7 +392,7 @@ class LLMAgent:
379
  optimize_btn = gr.Button("⚑ Optimize", elem_classes="lcars-button")
380
  # Chat Interface
381
  with gr.Column(elem_classes="lcars-panel"):
382
- gr.Markdown("""### πŸ’¬ MISSION LOG""")
383
  chatbot = gr.Chatbot(label="", height=300)
384
  with gr.Row():
385
  message_input = gr.Textbox(
@@ -400,13 +413,27 @@ class LLMAgent:
400
  clear_chat_btn = gr.Button("πŸ—‘οΈ Clear Chat", elem_classes="lcars-button")
401
  new_session_btn = gr.Button("πŸ†• New Session", elem_classes="lcars-button")
402
 
403
- # Event handlers are connected here, no change needed
404
- async def process_message(message, history, speech_enabled=True):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
  if not message.strip():
406
  return "", history, "Please enter a message"
407
  history = history + [[message, None]]
408
  try:
409
- # Fixed: Uses the new chat_with_canvas method which includes canvas context
410
  response = await self.chat_with_canvas(
411
  message, self.current_conversation, include_canvas=True
412
  )
@@ -435,19 +462,26 @@ class LLMAgent:
435
  def new_session():
436
  self.clear_conversation(self.current_conversation)
437
  self.clear_canvas(self.current_conversation)
438
- return [], "# New session started\nprint('Ready!')", "πŸ†• New session started", []
439
 
440
  # Connect events
 
 
 
 
 
 
441
  send_btn.click(process_message,
442
- inputs=[message_input, chatbot],
443
  outputs=[message_input, chatbot, status_display, artifact_display])
444
  message_input.submit(process_message,
445
- inputs=[message_input, chatbot],
446
  outputs=[message_input, chatbot, status_display, artifact_display])
447
  refresh_artifacts_btn.click(get_artifacts, outputs=artifact_display)
448
  clear_canvas_btn.click(clear_canvas, outputs=[artifact_display, status_display])
449
  clear_chat_btn.click(clear_chat, outputs=[chatbot, status_display])
450
  new_session_btn.click(new_session, outputs=[chatbot, code_editor, status_display, artifact_display])
 
451
  return interface
452
 
453
  def _register_event_handlers(self):
@@ -949,27 +983,430 @@ class LLMAgent:
949
  messages.insert(0, {"role": "system", "content": canvas_context})
950
 
951
  return await self.chat(messages)
952
-
953
 
 
 
 
954
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
955
 
956
 
957
  console = Console()
958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
959
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
960
 
961
  # --- Main Application ---
962
  def main():
963
  console.log("[bold blue]πŸš€ Starting LCARS Terminal...[/bold blue]")
964
- is_space = os.getenv('SPACE_ID') is not None
965
- if is_space:
966
- console.log("[green]🌐 Detected HuggingFace Space[/green]")
967
- else:
968
- console.log("[blue]πŸ’» Running locally[/blue]")
969
- interface = LLMAgent()
970
  demo = interface.create_interface()
971
  demo.launch(
972
- share=is_space
973
  )
974
 
975
  if __name__ == "__main__":
 
4
  import json
5
  import os
6
  import re
7
+ import time
8
  import uuid
9
  from typing import List, Dict, Any, Optional
10
  from dataclasses import dataclass
 
27
  import pyttsx3
28
  from rich.console import Console
29
 
 
30
  BASE_URL="http://localhost:1234/v1"
31
  BASE_API_KEY="not-needed"
32
  BASE_CLIENT = AsyncOpenAI(
 
43
  # --- Configuration ---
44
  LOCAL_BASE_URL = "http://localhost:1234/v1"
45
  LOCAL_API_KEY = "not-needed"
46
+ # Available model options
47
+ MODEL_OPTIONS = {
48
+ "Local LM Studio": LOCAL_BASE_URL,
49
+ }
50
  DEFAULT_TEMPERATURE = 0.7
51
  DEFAULT_MAX_TOKENS = 5000
52
  console = Console()
53
 
54
+ # --- Canvas Artifact Support ---
55
+ @dataclass
56
+ class CanvasArtifact:
57
+ id: str
58
+ type: str # 'code', 'diagram', 'text', 'image'
59
+ content: str
60
+ title: str
61
+ timestamp: float
62
+ metadata: Dict[str, Any]
63
+
64
  @dataclass
65
  class LLMMessage:
66
  role: str
 
69
  conversation_id: str = None
70
  timestamp: float = None
71
  metadata: Dict[str, Any] = None
 
72
  def __post_init__(self):
73
  if self.message_id is None:
74
  self.message_id = str(uuid.uuid4())
 
82
  message: LLMMessage
83
  response_event: str = None
84
  callback: Callable = None
 
85
  def __post_init__(self):
86
  if self.response_event is None:
87
  self.response_event = f"llm_response_{self.message.message_id}"
 
93
  success: bool = True
94
  error: str = None
95
 
96
+ # --- Event Manager (copied from your original code or imported) ---
97
  class EventManager:
98
  def __init__(self):
99
  self._handlers = defaultdict(list)
100
  self._lock = threading.Lock()
 
101
  def register(self, event: str, handler: Callable):
102
  with self._lock:
103
  self._handlers[event].append(handler)
 
104
  def unregister(self, event: str, handler: Callable):
105
  with self._lock:
106
  if event in self._handlers and handler in self._handlers[event]:
107
  self._handlers[event].remove(handler)
 
108
  def raise_event(self, event: str, data: Any):
109
  with self._lock:
110
  handlers = self._handlers[event][:]
 
111
  for handler in handlers:
112
  try:
113
  handler(data)
114
  except Exception as e:
115
  console.log(f"Error in event handler for {event}: {e}", style="bold red")
116
 
 
117
  EVENT_MANAGER = EventManager()
118
  def RegisterEvent(event: str, handler: Callable):
119
  EVENT_MANAGER.register(event, handler)
 
125
  EVENT_MANAGER.unregister(event, handler)
126
 
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
  class LLMAgent:
129
  """Main Agent Driver !
130
  Agent For Multiple messages at once ,
 
222
  """Default generate function if none provided"""
223
  return await self.openai_generate(messages)
224
  def create_interface(self):
225
+ """Create the full LCARS-styled interface"""
226
  lcars_css = """
227
  :root {
228
  --lcars-orange: #FF9900;
 
341
  with gr.Column(scale=1):
342
  # Configuration Panel
343
  with gr.Column(elem_classes="lcars-panel"):
344
+ gr.Markdown("### πŸ”§ LM STUDIO CONFIGURATION")
345
+ # Local LM Studio settings
346
+ with gr.Row():
347
+ base_url = gr.Textbox(
348
+ value=BASE_URL,
349
+ label="LM Studio URL",
350
+ elem_classes="lcars-input"
351
+ )
352
+ api_key = gr.Textbox(
353
+ value=BASE_API_KEY,
354
+ label="API Key",
355
+ type="password",
356
+ elem_classes="lcars-input"
357
+ )
358
+ with gr.Row():
359
+ model_dropdown = gr.Dropdown(
360
+ choices=["Fetching models..."],
361
+ value="Fetching models...",
362
+ label="AI Model",
363
+ elem_classes="lcars-input"
364
+ )
365
+ fetch_models_btn = gr.Button("πŸ“‘ Fetch Models", elem_classes="lcars-button")
366
+ with gr.Row():
367
+ temperature = gr.Slider(0.0, 2.0, value=0.7, label="Temperature")
368
+ max_tokens = gr.Slider(128, 8192, value=2000, step=128, label="Max Tokens")
369
+ with gr.Row():
370
+ update_config_btn = gr.Button("πŸ’Ύ Apply Config", elem_classes="lcars-button")
371
+ speech_toggle = gr.Checkbox(value=True, label="πŸ”Š Speech Output")
372
  # Canvas Artifacts
373
  with gr.Column(elem_classes="lcars-panel"):
374
+ gr.Markdown("### 🎨 CANVAS ARTIFACTS")
375
  artifact_display = gr.JSON(label="")
376
  with gr.Row():
377
  refresh_artifacts_btn = gr.Button("πŸ”„ Refresh", elem_classes="lcars-button")
 
380
  with gr.Column(scale=2):
381
  # Code Canvas
382
  with gr.Accordion("πŸ’» COLLABORATIVE CODE CANVAS", open=False):
383
+ code_editor = gr.Code(
384
+ value="# Welcome to LCARS Collaborative Canvas\\nprint('Hello, Starfleet!')",
385
  language="python",
386
  lines=15,
387
  label=""
 
392
  optimize_btn = gr.Button("⚑ Optimize", elem_classes="lcars-button")
393
  # Chat Interface
394
  with gr.Column(elem_classes="lcars-panel"):
395
+ gr.Markdown("### πŸ’¬ MISSION LOG")
396
  chatbot = gr.Chatbot(label="", height=300)
397
  with gr.Row():
398
  message_input = gr.Textbox(
 
413
  clear_chat_btn = gr.Button("πŸ—‘οΈ Clear Chat", elem_classes="lcars-button")
414
  new_session_btn = gr.Button("πŸ†• New Session", elem_classes="lcars-button")
415
 
416
+ # === EVENT HANDLERS ===
417
+ async def fetch_models_updated(base_url_val, api_key_val):
418
+ models = await LLMAgent.fetch_available_models(base_url_val, api_key_val)
419
+ if models:
420
+ return gr.update(choices=models, value=models[0])
421
+ return gr.update(choices=["No models found"])
422
+
423
+ def update_agent_connection(model_id, base_url_val, api_key_val):
424
+ self.agent = LLMAgent(
425
+ model_id=model_id,
426
+ base_url=base_url_val,
427
+ api_key=api_key_val,
428
+ generate_fn=LLMAgent.openai_generate
429
+ )
430
+ return f"βœ… Connected to LM Studio: {model_id}"
431
+
432
+ async def process_message(message, history, speech_enabled):
433
  if not message.strip():
434
  return "", history, "Please enter a message"
435
  history = history + [[message, None]]
436
  try:
 
437
  response = await self.chat_with_canvas(
438
  message, self.current_conversation, include_canvas=True
439
  )
 
462
  def new_session():
463
  self.clear_conversation(self.current_conversation)
464
  self.clear_canvas(self.current_conversation)
465
+ return [], "# New session started\\nprint('Ready!')", "πŸ†• New session started", []
466
 
467
  # Connect events
468
+ fetch_models_btn.click(fetch_models_updated,
469
+ inputs=[base_url, api_key],
470
+ outputs=model_dropdown)
471
+ update_config_btn.click(update_agent_connection,
472
+ inputs=[model_dropdown, base_url, api_key],
473
+ outputs=status_display)
474
  send_btn.click(process_message,
475
+ inputs=[message_input, chatbot, speech_toggle],
476
  outputs=[message_input, chatbot, status_display, artifact_display])
477
  message_input.submit(process_message,
478
+ inputs=[message_input, chatbot, speech_toggle],
479
  outputs=[message_input, chatbot, status_display, artifact_display])
480
  refresh_artifacts_btn.click(get_artifacts, outputs=artifact_display)
481
  clear_canvas_btn.click(clear_canvas, outputs=[artifact_display, status_display])
482
  clear_chat_btn.click(clear_chat, outputs=[chatbot, status_display])
483
  new_session_btn.click(new_session, outputs=[chatbot, code_editor, status_display, artifact_display])
484
+ interface.load(get_artifacts, outputs=artifact_display)
485
  return interface
486
 
487
  def _register_event_handlers(self):
 
983
  messages.insert(0, {"role": "system", "content": canvas_context})
984
 
985
  return await self.chat(messages)
 
986
 
987
+
988
+
989
+ console = Console()
990
 
991
+ # --- Canvas Artifact Support ---
992
+ @dataclass
993
+ class CanvasArtifact:
994
+ id: str
995
+ type: str # 'code', 'diagram', 'text', 'image'
996
+ content: str
997
+ title: str
998
+ timestamp: float
999
+ metadata: Dict[str, Any]
1000
+
1001
+ class EnhancedAIAgent:
1002
+ """
1003
+ Wrapper around your AI_Agent that adds canvas/artifact management
1004
+ without modifying the original agent.
1005
+ """
1006
+ def __init__(self, ai_agent):
1007
+ self.agent = ai_agent
1008
+ self.canvas_artifacts: Dict[str, List[CanvasArtifact]] = {}
1009
+ self.max_canvas_artifacts = 50
1010
+ console.log("[bold green]βœ“ Enhanced AI Agent wrapper initialized[/bold green]")
1011
+
1012
+ def add_artifact_to_canvas(self, conversation_id: str, content: str,
1013
+ artifact_type: str = "code", title: str = None):
1014
+ """Add artifacts to the collaborative canvas"""
1015
+ if conversation_id not in self.canvas_artifacts:
1016
+ self.canvas_artifacts[conversation_id] = []
1017
+
1018
+ artifact = CanvasArtifact(
1019
+ id=str(uuid.uuid4())[:8],
1020
+ type=artifact_type,
1021
+ content=content,
1022
+ title=title or f"{artifact_type}_{len(self.canvas_artifacts[conversation_id]) + 1}",
1023
+ timestamp=time.time(),
1024
+ metadata={"conversation_id": conversation_id}
1025
+ )
1026
+
1027
+ self.canvas_artifacts[conversation_id].append(artifact)
1028
+
1029
+ # Keep only recent artifacts
1030
+ if len(self.canvas_artifacts[conversation_id]) > self.max_canvas_artifacts:
1031
+ self.canvas_artifacts[conversation_id] = self.canvas_artifacts[conversation_id][-self.max_canvas_artifacts:]
1032
+
1033
+ console.log(f"[green]Added artifact to canvas: {artifact.title}[/green]")
1034
+ return artifact
1035
+
1036
+ def get_canvas_context(self, conversation_id: str) -> str:
1037
+ """Get formatted canvas context for LLM prompts"""
1038
+ if conversation_id not in self.canvas_artifacts or not self.canvas_artifacts[conversation_id]:
1039
+ return ""
1040
+
1041
+ context_lines = ["\n=== COLLABORATIVE CANVAS ARTIFACTS ==="]
1042
+ for artifact in self.canvas_artifacts[conversation_id][-10:]: # Last 10 artifacts
1043
+ context_lines.append(f"\n--- {artifact.title} [{artifact.type.upper()}] ---")
1044
+ preview = artifact.content[:500] + "..." if len(artifact.content) > 500 else artifact.content
1045
+ context_lines.append(preview)
1046
+
1047
+ return "\n".join(context_lines) + "\n=================================\n"
1048
+
1049
+ async def chat_with_canvas(self, message: str, conversation_id: str = "default",
1050
+ include_canvas: bool = True) -> str:
1051
+ """Enhanced chat that includes canvas context"""
1052
+ # Build context with canvas artifacts if requested
1053
+ full_message = message
1054
+ if include_canvas:
1055
+ canvas_context = self.get_canvas_context(conversation_id)
1056
+ if canvas_context:
1057
+ full_message = f"{canvas_context}\n\nUser Query: {message}"
1058
+
1059
+ try:
1060
+ # Use your original agent's multi_turn_chat method
1061
+ response = await self.agent.multi_turn_chat(full_message)
1062
+
1063
+ # Auto-extract and add code artifacts to canvas
1064
+ self._extract_artifacts_to_canvas(response, conversation_id)
1065
+
1066
+ return response
1067
+
1068
+ except Exception as e:
1069
+ error_msg = f"Error in chat_with_canvas: {str(e)}"
1070
+ console.log(f"[red]{error_msg}[/red]")
1071
+ return error_msg
1072
+
1073
+ def _extract_artifacts_to_canvas(self, response: str, conversation_id: str):
1074
+ """Automatically extract code blocks and add to canvas"""
1075
+ # Find all code blocks with optional language specification
1076
+ code_blocks = re.findall(r'```(?:(\w+)\n)?(.*?)```', response, re.DOTALL)
1077
+ for i, (lang, code_block) in enumerate(code_blocks):
1078
+ if len(code_block.strip()) > 10: # Only add substantial code blocks
1079
+ self.add_artifact_to_canvas(
1080
+ conversation_id,
1081
+ code_block.strip(),
1082
+ "code",
1083
+ f"code_snippet_{lang or 'unknown'}_{len(self.canvas_artifacts.get(conversation_id, [])) + 1}"
1084
+ )
1085
+
1086
+ def get_canvas_summary(self, conversation_id: str) -> List[Dict]:
1087
+ """Get summary of canvas artifacts for display"""
1088
+ if conversation_id not in self.canvas_artifacts:
1089
+ return []
1090
+
1091
+ artifacts = []
1092
+ for artifact in reversed(self.canvas_artifacts[conversation_id]): # Newest first
1093
+ artifacts.append({
1094
+ "id": artifact.id,
1095
+ "type": artifact.type.upper(),
1096
+ "title": artifact.title,
1097
+ "preview": artifact.content[:100] + "..." if len(artifact.content) > 100 else artifact.content,
1098
+ "timestamp": time.strftime("%H:%M:%S", time.localtime(artifact.timestamp))
1099
+ })
1100
+
1101
+ return artifacts
1102
+
1103
+ def get_artifact_by_id(self, conversation_id: str, artifact_id: str) -> Optional[CanvasArtifact]:
1104
+ """Get specific artifact by ID"""
1105
+ if conversation_id not in self.canvas_artifacts:
1106
+ return None
1107
+
1108
+ for artifact in self.canvas_artifacts[conversation_id]:
1109
+ if artifact.id == artifact_id:
1110
+ return artifact
1111
+ return None
1112
+
1113
+ def clear_canvas(self, conversation_id: str = "default"):
1114
+ """Clear canvas artifacts"""
1115
+ if conversation_id in self.canvas_artifacts:
1116
+ self.canvas_artifacts[conversation_id] = []
1117
+ console.log(f"[yellow]Cleared canvas: {conversation_id}[/yellow]")
1118
+
1119
+ def get_latest_code_artifact(self, conversation_id: str) -> Optional[str]:
1120
+ """Get the most recent code artifact content"""
1121
+ if conversation_id not in self.canvas_artifacts:
1122
+ return None
1123
+
1124
+ for artifact in reversed(self.canvas_artifacts[conversation_id]):
1125
+ if artifact.type == "code":
1126
+ return artifact.content
1127
+ return None
1128
 
1129
 
1130
  console = Console()
1131
 
1132
+ # --- LCARS Styled Gradio Interface ---
1133
+ class LcarsInterface:
1134
+ def __init__(self):
1135
+ # Local LM Studio only
1136
+ self.use_huggingface = False
1137
+ self.agent = LLMAgent(generate_fn=LLMAgent.openai_generate)
1138
+ self.current_conversation = "default"
1139
+
1140
+ def create_interface(self):
1141
+ """Create the full LCARS-styled interface"""
1142
+ lcars_css = """
1143
+ :root {
1144
+ --lcars-orange: #FF9900;
1145
+ --lcars-red: #FF0033;
1146
+ --lcars-blue: #6699FF;
1147
+ --lcars-purple: #CC99FF;
1148
+ --lcars-pale-blue: #99CCFF;
1149
+ --lcars-black: #000000;
1150
+ --lcars-dark-blue: #3366CC;
1151
+ --lcars-gray: #424242;
1152
+ --lcars-yellow: #FFFF66;
1153
+ }
1154
+ body {
1155
+ background: var(--lcars-black);
1156
+ color: var(--lcars-orange);
1157
+ font-family: 'Antonio', 'LCD', 'Courier New', monospace;
1158
+ margin: 0;
1159
+ padding: 0;
1160
+ }
1161
+ .gradio-container {
1162
+ background: var(--lcars-black) !important;
1163
+ min-height: 100vh;
1164
+ }
1165
+ .lcars-container {
1166
+ background: var(--lcars-black);
1167
+ border: 4px solid var(--lcars-orange);
1168
+ border-radius: 0 30px 0 0;
1169
+ min-height: 100vh;
1170
+ padding: 20px;
1171
+ }
1172
+ .lcars-header {
1173
+ background: linear-gradient(90deg, var(--lcars-red), var(--lcars-orange));
1174
+ padding: 20px 40px;
1175
+ border-radius: 0 60px 0 0;
1176
+ margin: -20px -20px 20px -20px;
1177
+ border-bottom: 6px solid var(--lcars-blue);
1178
+ }
1179
+ .lcars-title {
1180
+ font-size: 2.5em;
1181
+ font-weight: bold;
1182
+ color: var(--lcars-black);
1183
+ margin: 0;
1184
+ }
1185
+ .lcars-subtitle {
1186
+ font-size: 1.2em;
1187
+ color: var(--lcars-black);
1188
+ margin: 10px 0 0 0;
1189
+ }
1190
+ .lcars-panel {
1191
+ background: rgba(66, 66, 66, 0.9);
1192
+ border: 2px solid var(--lcars-orange);
1193
+ border-radius: 0 20px 0 20px;
1194
+ padding: 15px;
1195
+ margin-bottom: 15px;
1196
+ }
1197
+ .lcars-button {
1198
+ background: var(--lcars-orange);
1199
+ color: var(--lcars-black) !important;
1200
+ border: none !important;
1201
+ border-radius: 0 15px 0 15px !important;
1202
+ padding: 10px 20px !important;
1203
+ font-family: inherit !important;
1204
+ font-weight: bold !important;
1205
+ margin: 5px !important;
1206
+ }
1207
+ .lcars-button:hover {
1208
+ background: var(--lcars-red) !important;
1209
+ }
1210
+ .lcars-input {
1211
+ background: var(--lcars-black) !important;
1212
+ color: var(--lcars-orange) !important;
1213
+ border: 2px solid var(--lcars-blue) !important;
1214
+ border-radius: 0 10px 0 10px !important;
1215
+ padding: 10px !important;
1216
+ }
1217
+ .lcars-chatbot {
1218
+ background: var(--lcars-black) !important;
1219
+ border: 2px solid var(--lcars-purple) !important;
1220
+ border-radius: 0 15px 0 15px !important;
1221
+ }
1222
+ .status-indicator {
1223
+ display: inline-block;
1224
+ width: 12px;
1225
+ height: 12px;
1226
+ border-radius: 50%;
1227
+ background: var(--lcars-red);
1228
+ margin-right: 8px;
1229
+ }
1230
+ .status-online {
1231
+ background: var(--lcars-blue);
1232
+ animation: pulse 2s infinite;
1233
+ }
1234
+ @keyframes pulse {
1235
+ 0% { opacity: 1; }
1236
+ 50% { opacity: 0.5; }
1237
+ 100% { opacity: 1; }
1238
+ }
1239
+ """
1240
+ with gr.Blocks(css=lcars_css, theme=gr.themes.Default(), title="LCARS Terminal") as interface:
1241
+ with gr.Column(elem_classes="lcars-container"):
1242
+ # Header
1243
+ with gr.Row(elem_classes="lcars-header"):
1244
+ gr.Markdown("""
1245
+ <div style="text-align: center; width: 100%;">
1246
+ <div class="lcars-title">πŸš€ LCARS TERMINAL</div>
1247
+ <div class="lcars-subtitle">STARFLEET AI DEVELOPMENT CONSOLE</div>
1248
+ <div style="margin-top: 10px;">
1249
+ <span class="status-indicator status-online"></span>
1250
+ <span style="color: var(--lcars-black); font-weight: bold;">SYSTEM ONLINE</span>
1251
+ </div>
1252
+ </div>
1253
+ """)
1254
+ # Main Content
1255
+ with gr.Row():
1256
+ # Left Sidebar
1257
+ with gr.Column(scale=1):
1258
+ # Configuration Panel
1259
+ with gr.Column(elem_classes="lcars-panel"):
1260
+ gr.Markdown("### πŸ”§ LM STUDIO CONFIGURATION")
1261
+ # Local LM Studio settings
1262
+ with gr.Row():
1263
+ base_url = gr.Textbox(
1264
+ value=LOCAL_BASE_URL,
1265
+ label="LM Studio URL",
1266
+ elem_classes="lcars-input"
1267
+ )
1268
+ api_key = gr.Textbox(
1269
+ value=LOCAL_API_KEY,
1270
+ label="API Key",
1271
+ type="password",
1272
+ elem_classes="lcars-input"
1273
+ )
1274
+ with gr.Row():
1275
+ model_dropdown = gr.Dropdown(
1276
+ choices=["Fetching models..."],
1277
+ value="Fetching models...",
1278
+ label="AI Model",
1279
+ elem_classes="lcars-input"
1280
+ )
1281
+ fetch_models_btn = gr.Button("πŸ“‘ Fetch Models", elem_classes="lcars-button")
1282
+ with gr.Row():
1283
+ temperature = gr.Slider(0.0, 2.0, value=0.7, label="Temperature")
1284
+ max_tokens = gr.Slider(128, 8192, value=2000, step=128, label="Max Tokens")
1285
+ with gr.Row():
1286
+ update_config_btn = gr.Button("πŸ’Ύ Apply Config", elem_classes="lcars-button")
1287
+ speech_toggle = gr.Checkbox(value=True, label="πŸ”Š Speech Output")
1288
+ # Canvas Artifacts
1289
+ with gr.Column(elem_classes="lcars-panel"):
1290
+ gr.Markdown("### 🎨 CANVAS ARTIFACTS")
1291
+ artifact_display = gr.JSON(label="")
1292
+ with gr.Row():
1293
+ refresh_artifacts_btn = gr.Button("πŸ”„ Refresh", elem_classes="lcars-button")
1294
+ clear_canvas_btn = gr.Button("πŸ—‘οΈ Clear Canvas", elem_classes="lcars-button")
1295
+ # Main Content Area
1296
+ with gr.Column(scale=2):
1297
+ # Code Canvas
1298
+ with gr.Accordion("πŸ’» COLLABORATIVE CODE CANVAS", open=False):
1299
+ code_editor = gr.Code(
1300
+ value="# Welcome to LCARS Collaborative Canvas\\nprint('Hello, Starfleet!')",
1301
+ language="python",
1302
+ lines=15,
1303
+ label=""
1304
+ )
1305
+ with gr.Row():
1306
+ load_to_chat_btn = gr.Button("πŸ’¬ Discuss Code", elem_classes="lcars-button")
1307
+ analyze_btn = gr.Button("πŸ” Analyze", elem_classes="lcars-button")
1308
+ optimize_btn = gr.Button("⚑ Optimize", elem_classes="lcars-button")
1309
+ # Chat Interface
1310
+ with gr.Column(elem_classes="lcars-panel"):
1311
+ gr.Markdown("### πŸ’¬ MISSION LOG")
1312
+ chatbot = gr.Chatbot(label="", height=300)
1313
+ with gr.Row():
1314
+ message_input = gr.Textbox(
1315
+ placeholder="Enter your command or query...",
1316
+ show_label=False,
1317
+ lines=2,
1318
+ scale=4
1319
+ )
1320
+ send_btn = gr.Button("πŸš€ SEND", elem_classes="lcars-button", scale=1)
1321
+ # Status
1322
+ with gr.Row():
1323
+ status_display = gr.Textbox(
1324
+ value="LCARS terminal operational. Awaiting commands.",
1325
+ label="Status",
1326
+ max_lines=2
1327
+ )
1328
+ with gr.Column(scale=0):
1329
+ clear_chat_btn = gr.Button("πŸ—‘οΈ Clear Chat", elem_classes="lcars-button")
1330
+ new_session_btn = gr.Button("πŸ†• New Session", elem_classes="lcars-button")
1331
+
1332
+ # === EVENT HANDLERS ===
1333
+ async def fetch_models_updated(base_url_val, api_key_val):
1334
+ models = await LLMAgent.fetch_available_models(base_url_val, api_key_val)
1335
+ if models:
1336
+ return gr.update(choices=models, value=models[0])
1337
+ return gr.update(choices=["No models found"])
1338
+
1339
+ def update_agent_connection(model_id, base_url_val, api_key_val):
1340
+ self.agent = LLMAgent(
1341
+ model_id=model_id,
1342
+ base_url=base_url_val,
1343
+ api_key=api_key_val,
1344
+ generate_fn=LLMAgent.openai_generate
1345
+ )
1346
+ return f"βœ… Connected to LM Studio: {model_id}"
1347
+
1348
+ async def process_message(message, history, speech_enabled):
1349
+ if not message.strip():
1350
+ return "", history, "Please enter a message"
1351
+ history = history + [[message, None]]
1352
+ try:
1353
+ response = await self.agent.chat_with_canvas(
1354
+ message, self.current_conversation, include_canvas=True
1355
+ )
1356
+ history[-1][1] = response
1357
+ if speech_enabled and self.agent.speech_enabled:
1358
+ self.agent.speak(response)
1359
+ artifacts = self.agent.get_canvas_summary(self.current_conversation)
1360
+ status = f"βœ… Response received. Canvas artifacts: {len(artifacts)}"
1361
+ return "", history, status, artifacts
1362
+ except Exception as e:
1363
+ error_msg = f"❌ Error: {str(e)}"
1364
+ history[-1][1] = error_msg
1365
+ return "", history, error_msg, self.agent.get_canvas_summary(self.current_conversation)
1366
 
1367
+ def get_artifacts():
1368
+ return self.agent.get_canvas_summary(self.current_conversation)
1369
+
1370
+ def clear_canvas():
1371
+ self.agent.clear_canvas(self.current_conversation)
1372
+ return [], "βœ… Canvas cleared"
1373
+
1374
+ def clear_chat():
1375
+ self.agent.clear_conversation(self.current_conversation)
1376
+ return [], "βœ… Chat cleared"
1377
+
1378
+ def new_session():
1379
+ self.agent.clear_conversation(self.current_conversation)
1380
+ self.agent.clear_canvas(self.current_conversation)
1381
+ return [], "# New session started\\nprint('Ready!')", "πŸ†• New session started", []
1382
+
1383
+ # Connect events
1384
+ fetch_models_btn.click(fetch_models_updated,
1385
+ inputs=[base_url, api_key],
1386
+ outputs=model_dropdown)
1387
+ update_config_btn.click(update_agent_connection,
1388
+ inputs=[model_dropdown, base_url, api_key],
1389
+ outputs=status_display)
1390
+ send_btn.click(process_message,
1391
+ inputs=[message_input, chatbot, speech_toggle],
1392
+ outputs=[message_input, chatbot, status_display, artifact_display])
1393
+ message_input.submit(process_message,
1394
+ inputs=[message_input, chatbot, speech_toggle],
1395
+ outputs=[message_input, chatbot, status_display, artifact_display])
1396
+ refresh_artifacts_btn.click(get_artifacts, outputs=artifact_display)
1397
+ clear_canvas_btn.click(clear_canvas, outputs=[artifact_display, status_display])
1398
+ clear_chat_btn.click(clear_chat, outputs=[chatbot, status_display])
1399
+ new_session_btn.click(new_session, outputs=[chatbot, code_editor, status_display, artifact_display])
1400
+ interface.load(get_artifacts, outputs=artifact_display)
1401
+ return interface
1402
 
1403
  # --- Main Application ---
1404
  def main():
1405
  console.log("[bold blue]πŸš€ Starting LCARS Terminal...[/bold blue]")
1406
+ interface = LcarsInterface()
 
 
 
 
 
1407
  demo = interface.create_interface()
1408
  demo.launch(
1409
+ share=False
1410
  )
1411
 
1412
  if __name__ == "__main__":