Spaces:
Sleeping
Sleeping
File size: 5,901 Bytes
a2c609c cfb3178 3b4a91d a3d17e4 3b4a91d a3d17e4 8698de5 3b4a91d a3d17e4 3b4a91d cfb3178 a3d17e4 3a46943 8698de5 3b4a91d 2c704f8 3b4a91d cfb3178 3b4a91d a3d17e4 8698de5 2c704f8 a3d17e4 8698de5 3b4a91d cfb3178 3b4a91d cfb3178 3b4a91d cfb3178 3b4a91d cfb3178 3b4a91d cfb3178 3b4a91d cfb3178 3b4a91d cfb3178 3b4a91d cfb3178 3b4a91d cfb3178 3b4a91d cfb3178 3b4a91d cfb3178 3b4a91d cfb3178 3b4a91d 8698de5 cfb3178 3b4a91d 3a46943 cfb3178 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
import gradio as gr
import requests
from huggingface_hub import InferenceClient
# ---------- Chat & Career Models ----------
MODELS = [
"meta-llama/Meta-Llama-3-8B-Instruct",
"mistralai/Mistral-7B-Instruct-v0.3",
"google/gemma-7b-it"
]
def ai_chat(message, history):
if history is None:
history = []
for model in MODELS:
try:
client = InferenceClient(model)
conversation = ""
for h in history:
conversation += f"User: {h[0]}\nStudyMate: {h[1]}\n"
prompt = (
"You are StudyMate AI, a multilingual tutor and career mentor. "
"Always reply clearly in the same language as the user. "
"If you are unsure, give your best explanation and encourage learning.\n\n"
+ conversation
+ f"User: {message}\nStudyMate:"
)
reply = ""
for chunk in client.text_generation(
prompt, max_new_tokens=512, stream=True, temperature=0.7
):
reply += chunk.token
history.append((message, reply))
return history, ""
except Exception:
continue
history.append((message, "Sorry 😔 I couldn’t generate a full answer right now. Try again."))
return history, ""
# ---------- Career Compass ----------
def career_compass(skill):
if not skill:
return "Please enter a skill or field."
return (
f"📘 **Career Plan for {skill.title()}**\n\n"
f"**1️⃣ Learn:** Take beginner-to-advanced online courses (Coursera, YouTube, freecodecamp).\n"
f"**2️⃣ Build:** Create 2-3 small portfolio projects related to {skill}.\n"
f"**3️⃣ Connect:** Join LinkedIn groups or Pakistani Facebook communities for {skill}.\n"
f"**4️⃣ Earn:** Search for remote jobs or gigs via StudyMate AI’s Job Finder tab.\n"
f"**5️⃣ Scale:** Once confident, teach others — it multiplies learning.\n\n"
f"✨ Tip: Consistency for 90 days = visible progress."
)
# ---------- Voice ----------
def tts_generate(text):
if not text:
return None
try:
client = InferenceClient("coqui-ai/TTS-1-en")
audio = client.audio_generation(text)
return (audio, )
except Exception:
return None
# ---------- CSS (iOS Glass Theme + Animations) ----------
css_style = """
body {
background: radial-gradient(circle at top left, #0a0f1c, #111827);
overflow: hidden;
font-family: 'Poppins', sans-serif;
color: white;
}
.gradio-container {
background: rgba(255, 255, 255, 0.05);
backdrop-filter: blur(20px);
border-radius: 24px;
padding: 24px;
box-shadow: 0 0 20px rgba(255,255,255,0.05);
animation: fadeIn 1.5s ease-in;
}
button:hover {
transform: scale(1.05);
transition: 0.3s;
}
@keyframes fadeIn {
from { opacity: 0; transform: translateY(20px); }
to { opacity: 1; transform: translateY(0); }
}
/* Floating background icons */
.floating {
position: absolute;
width: 60px;
height: 60px;
opacity: 0.15;
animation: float 12s ease-in-out infinite;
}
@keyframes float {
0% { transform: translateY(0) rotate(0deg); }
50% { transform: translateY(-30px) rotate(20deg); }
100% { transform: translateY(0) rotate(0deg); }
}
#icon1 { top: 10%; left: 20%; animation-delay: 0s; }
#icon2 { top: 60%; left: 70%; animation-delay: 2s; }
#icon3 { top: 40%; left: 10%; animation-delay: 4s; }
.loader {
display: inline-block;
width: 20px;
height: 20px;
border: 3px solid rgba(255,255,255,0.3);
border-radius: 50%;
border-top-color: #ffffff;
animation: spin 1s ease-in-out infinite;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
"""
# ---------- UI ----------
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue"), css=css_style) as demo:
gr.HTML("""
<div style='text-align:center; position:relative;'>
<h1 style='font-size:2.5em; color:white;'>StudyMate AI</h1>
<p style='opacity:0.7;'>Your smart study, career, and voice companion</p>
<img id='icon1' class='floating' src='https://cdn-icons-png.flaticon.com/512/1048/1048945.png'/>
<img id='icon2' class='floating' src='https://cdn-icons-png.flaticon.com/512/616/616408.png'/>
<img id='icon3' class='floating' src='https://cdn-icons-png.flaticon.com/512/1828/1828778.png'/>
</div>
""")
with gr.Tab("💬 Chat"):
chatbot = gr.Chatbot(label="Ask anything", height=480)
msg = gr.Textbox(placeholder="Type or speak your question…")
voice_in = gr.Microphone(label="🎤 Speak", type="filepath")
voice_out = gr.Audio(label="🔊 Reply Voice", type="filepath")
clear = gr.Button("Clear Chat")
msg.submit(ai_chat, [msg, chatbot], [chatbot, msg])
clear.click(lambda: None, None, chatbot, queue=False)
def voice_to_text(audio, chat_history):
if audio is None:
return chat_history, ""
try:
client = InferenceClient("openai/whisper-tiny.en")
result = client.automatic_speech_recognition(audio)
text = result.get("text", "")
chat_history, _ = ai_chat(text, chat_history)
reply_audio = tts_generate(chat_history[-1][1])
return chat_history, reply_audio
except Exception:
chat_history.append(("", "Sorry, I couldn’t process voice input."))
return chat_history, None
voice_in.change(voice_to_text, [voice_in, chatbot], [chatbot, voice_out])
with gr.Tab("🧭 Career Compass"):
skill = gr.Textbox(label="Enter your skill or goal")
plan_btn = gr.Button("Generate Career Plan")
plan_output = gr.Markdown()
plan_btn.click(career_compass, skill, plan_output)
demo.launch()
|