Speedofmastery commited on
Commit
3653724
Β·
verified Β·
1 Parent(s): cdcafca

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +105 -25
app.py CHANGED
@@ -8,17 +8,27 @@ from pathlib import Path
8
  from huggingface_hub import InferenceClient
9
 
10
  # Initialize HuggingFace Inference Client for real AI responses
11
- HF_TOKEN = os.getenv("HF_TOKEN", "") # Set in HuggingFace Space Settings -> Repository Secrets
 
 
12
  inference_client = InferenceClient(token=HF_TOKEN if HF_TOKEN else None)
13
 
14
  # Cloudflare configuration - credentials from wrangler.toml and CLI
15
  CLOUDFLARE_CONFIG = {
16
  "api_token": os.getenv("CLOUDFLARE_API_TOKEN", ""),
17
- "account_id": os.getenv("CLOUDFLARE_ACCOUNT_ID", "62af59a7ac82b29543577ee6800735ee"),
18
- "d1_database_id": os.getenv("CLOUDFLARE_D1_DATABASE_ID", "6d887f74-98ac-4db7-bfed-8061903d1f6c"),
 
 
 
 
19
  "r2_bucket_name": os.getenv("CLOUDFLARE_R2_BUCKET_NAME", "openmanus-storage"),
20
- "kv_namespace_id": os.getenv("CLOUDFLARE_KV_NAMESPACE_ID", "87f4aa01410d4fb19821f61006f94441"),
21
- "kv_namespace_cache": os.getenv("CLOUDFLARE_KV_CACHE_ID", "7b58c88292c847d1a82c8e0dd5129f37"),
 
 
 
 
22
  "durable_objects_sessions": "AGENT_SESSIONS",
23
  "durable_objects_chatrooms": "CHAT_ROOMS",
24
  }
@@ -452,17 +462,47 @@ def use_ai_model(model_name, input_text, user_session="guest"):
452
 
453
  # Determine model category for specialized handling
454
  category = "text"
455
- if any(x in model_lower for x in ["codellama", "starcoder", "codegen", "replit", "polycoder", "coder"]):
 
 
 
456
  category = "software_engineer"
457
- elif any(x in model_lower for x in ["flux", "diffusion", "stable-diffusion", "sdxl", "kandinsky"]):
 
 
 
458
  category = "image_gen"
459
- elif any(x in model_lower for x in ["pix2pix", "inpaint", "controlnet", "photomaker", "instantid"]):
 
 
 
460
  category = "image_edit"
461
- elif any(x in model_lower for x in ["math", "teacher", "education", "translate", "wizard"]) and "coder" not in model_lower:
 
 
 
 
 
 
462
  category = "education"
463
- elif any(x in model_lower for x in ["tts", "speech", "audio", "whisper", "wav2vec", "bark"]):
 
 
 
464
  category = "audio"
465
- elif any(x in model_lower for x in ["face", "avatar", "talking", "wav2lip", "vl", "blip", "vision", "llava"]):
 
 
 
 
 
 
 
 
 
 
 
 
466
  category = "multimodal"
467
 
468
  try:
@@ -472,14 +512,20 @@ def use_ai_model(model_name, input_text, user_session="guest"):
472
  response += f"πŸ“Έ Prompt: '{input_text}'\n\n"
473
  response += f"ℹ️ Image generation models require special handling. "
474
  response += f"The model '{model_name}' will create an image based on your prompt.\n\n"
475
- response += f"πŸ’‘ To view the generated image, use the Image Generation interface."
 
 
476
  return response
477
 
478
  elif category == "audio":
479
  response = f"🎡 {model_name} audio processing...\n\n"
480
  response += f"Input: '{input_text}'\n\n"
481
- response += f"ℹ️ Audio models require audio file input or special parameters. "
482
- response += f"Please use the Audio Processing interface for full functionality."
 
 
 
 
483
  return response
484
 
485
  else:
@@ -487,25 +533,53 @@ def use_ai_model(model_name, input_text, user_session="guest"):
487
  messages = []
488
 
489
  if category == "software_engineer":
490
- messages.append({"role": "system", "content": "You are an expert software engineer. Provide production-ready code with best practices, error handling, and clear documentation."})
 
 
 
 
 
491
  elif category == "education":
492
- messages.append({"role": "system", "content": "You are an expert AI teacher. Provide clear, step-by-step explanations with examples to help students understand."})
 
 
 
 
 
493
  elif category == "multimodal":
494
- messages.append({"role": "system", "content": "You are a multimodal AI assistant capable of understanding and describing visual content and complex queries."})
 
 
 
 
 
495
 
496
  messages.append({"role": "user", "content": input_text})
497
 
498
  # Call HuggingFace Inference API
499
  full_response = ""
500
  try:
501
- for message in inference_client.chat_completion(model=model_name, messages=messages, max_tokens=2000, temperature=0.7, stream=True):
 
 
 
 
 
 
502
  if message.choices and message.choices[0].delta.content:
503
  full_response += message.choices[0].delta.content
504
 
505
  if not full_response:
506
- full_response = "Model response was empty. Try rephrasing your prompt."
 
 
507
 
508
- icons = {"software_engineer": "πŸ’»", "education": "πŸŽ“", "multimodal": "πŸ€–", "text": "🧠"}
 
 
 
 
 
509
  icon = icons.get(category, "✨")
510
 
511
  return f"{icon} **{model_name}**\n\n{full_response}"
@@ -538,14 +612,20 @@ def get_cloudflare_status():
538
  services.append("βš™οΈ R2 Storage (Configure CLOUDFLARE_R2_BUCKET_NAME)")
539
 
540
  if CLOUDFLARE_CONFIG["kv_namespace_id"]:
541
- services.append("βœ… KV Cache Connected")
542
  else:
543
- services.append("βš™οΈ KV Cache (Configure CLOUDFLARE_KV_NAMESPACE_ID)")
544
 
545
- if CLOUDFLARE_CONFIG["durable_objects_id"]:
546
- services.append("βœ… Durable Objects Connected")
547
  else:
548
- services.append("βš™οΈ Durable Objects (Configure CLOUDFLARE_DURABLE_OBJECTS_ID)")
 
 
 
 
 
 
549
 
550
  return "\n".join(services)
551
 
 
8
  from huggingface_hub import InferenceClient
9
 
10
  # Initialize HuggingFace Inference Client for real AI responses
11
+ HF_TOKEN = os.getenv(
12
+ "HF_TOKEN", ""
13
+ ) # Set in HuggingFace Space Settings -> Repository Secrets
14
  inference_client = InferenceClient(token=HF_TOKEN if HF_TOKEN else None)
15
 
16
  # Cloudflare configuration - credentials from wrangler.toml and CLI
17
  CLOUDFLARE_CONFIG = {
18
  "api_token": os.getenv("CLOUDFLARE_API_TOKEN", ""),
19
+ "account_id": os.getenv(
20
+ "CLOUDFLARE_ACCOUNT_ID", "62af59a7ac82b29543577ee6800735ee"
21
+ ),
22
+ "d1_database_id": os.getenv(
23
+ "CLOUDFLARE_D1_DATABASE_ID", "6d887f74-98ac-4db7-bfed-8061903d1f6c"
24
+ ),
25
  "r2_bucket_name": os.getenv("CLOUDFLARE_R2_BUCKET_NAME", "openmanus-storage"),
26
+ "kv_namespace_id": os.getenv(
27
+ "CLOUDFLARE_KV_NAMESPACE_ID", "87f4aa01410d4fb19821f61006f94441"
28
+ ),
29
+ "kv_namespace_cache": os.getenv(
30
+ "CLOUDFLARE_KV_CACHE_ID", "7b58c88292c847d1a82c8e0dd5129f37"
31
+ ),
32
  "durable_objects_sessions": "AGENT_SESSIONS",
33
  "durable_objects_chatrooms": "CHAT_ROOMS",
34
  }
 
462
 
463
  # Determine model category for specialized handling
464
  category = "text"
465
+ if any(
466
+ x in model_lower
467
+ for x in ["codellama", "starcoder", "codegen", "replit", "polycoder", "coder"]
468
+ ):
469
  category = "software_engineer"
470
+ elif any(
471
+ x in model_lower
472
+ for x in ["flux", "diffusion", "stable-diffusion", "sdxl", "kandinsky"]
473
+ ):
474
  category = "image_gen"
475
+ elif any(
476
+ x in model_lower
477
+ for x in ["pix2pix", "inpaint", "controlnet", "photomaker", "instantid"]
478
+ ):
479
  category = "image_edit"
480
+ elif (
481
+ any(
482
+ x in model_lower
483
+ for x in ["math", "teacher", "education", "translate", "wizard"]
484
+ )
485
+ and "coder" not in model_lower
486
+ ):
487
  category = "education"
488
+ elif any(
489
+ x in model_lower
490
+ for x in ["tts", "speech", "audio", "whisper", "wav2vec", "bark"]
491
+ ):
492
  category = "audio"
493
+ elif any(
494
+ x in model_lower
495
+ for x in [
496
+ "face",
497
+ "avatar",
498
+ "talking",
499
+ "wav2lip",
500
+ "vl",
501
+ "blip",
502
+ "vision",
503
+ "llava",
504
+ ]
505
+ ):
506
  category = "multimodal"
507
 
508
  try:
 
512
  response += f"πŸ“Έ Prompt: '{input_text}'\n\n"
513
  response += f"ℹ️ Image generation models require special handling. "
514
  response += f"The model '{model_name}' will create an image based on your prompt.\n\n"
515
+ response += (
516
+ f"πŸ’‘ To view the generated image, use the Image Generation interface."
517
+ )
518
  return response
519
 
520
  elif category == "audio":
521
  response = f"🎡 {model_name} audio processing...\n\n"
522
  response += f"Input: '{input_text}'\n\n"
523
+ response += (
524
+ f"ℹ️ Audio models require audio file input or special parameters. "
525
+ )
526
+ response += (
527
+ f"Please use the Audio Processing interface for full functionality."
528
+ )
529
  return response
530
 
531
  else:
 
533
  messages = []
534
 
535
  if category == "software_engineer":
536
+ messages.append(
537
+ {
538
+ "role": "system",
539
+ "content": "You are an expert software engineer. Provide production-ready code with best practices, error handling, and clear documentation.",
540
+ }
541
+ )
542
  elif category == "education":
543
+ messages.append(
544
+ {
545
+ "role": "system",
546
+ "content": "You are an expert AI teacher. Provide clear, step-by-step explanations with examples to help students understand.",
547
+ }
548
+ )
549
  elif category == "multimodal":
550
+ messages.append(
551
+ {
552
+ "role": "system",
553
+ "content": "You are a multimodal AI assistant capable of understanding and describing visual content and complex queries.",
554
+ }
555
+ )
556
 
557
  messages.append({"role": "user", "content": input_text})
558
 
559
  # Call HuggingFace Inference API
560
  full_response = ""
561
  try:
562
+ for message in inference_client.chat_completion(
563
+ model=model_name,
564
+ messages=messages,
565
+ max_tokens=2000,
566
+ temperature=0.7,
567
+ stream=True,
568
+ ):
569
  if message.choices and message.choices[0].delta.content:
570
  full_response += message.choices[0].delta.content
571
 
572
  if not full_response:
573
+ full_response = (
574
+ "Model response was empty. Try rephrasing your prompt."
575
+ )
576
 
577
+ icons = {
578
+ "software_engineer": "πŸ’»",
579
+ "education": "πŸŽ“",
580
+ "multimodal": "πŸ€–",
581
+ "text": "🧠",
582
+ }
583
  icon = icons.get(category, "✨")
584
 
585
  return f"{icon} **{model_name}**\n\n{full_response}"
 
612
  services.append("βš™οΈ R2 Storage (Configure CLOUDFLARE_R2_BUCKET_NAME)")
613
 
614
  if CLOUDFLARE_CONFIG["kv_namespace_id"]:
615
+ services.append("βœ… KV Sessions Connected")
616
  else:
617
+ services.append("βš™οΈ KV Sessions (Configure CLOUDFLARE_KV_NAMESPACE_ID)")
618
 
619
+ if CLOUDFLARE_CONFIG["kv_namespace_cache"]:
620
+ services.append("βœ… KV Cache Connected")
621
  else:
622
+ services.append("βš™οΈ KV Cache (Configure CLOUDFLARE_KV_CACHE_ID)")
623
+
624
+ if CLOUDFLARE_CONFIG["durable_objects_sessions"]:
625
+ services.append("βœ… Durable Objects (Agent Sessions)")
626
+
627
+ if CLOUDFLARE_CONFIG["durable_objects_chatrooms"]:
628
+ services.append("βœ… Durable Objects (Chat Rooms)")
629
 
630
  return "\n".join(services)
631