StudyMateAI / app.py
BeyondJawad's picture
Update app.py
3b4a91d verified
import gradio as gr
import requests
from huggingface_hub import InferenceClient
# ---------- Chat & Career Models ----------
MODELS = [
"meta-llama/Meta-Llama-3-8B-Instruct",
"mistralai/Mistral-7B-Instruct-v0.3",
"google/gemma-7b-it"
]
def ai_chat(message, history):
if history is None:
history = []
for model in MODELS:
try:
client = InferenceClient(model)
conversation = ""
for h in history:
conversation += f"User: {h[0]}\nStudyMate: {h[1]}\n"
prompt = (
"You are StudyMate AI, a multilingual tutor and career mentor. "
"Always reply clearly in the same language as the user. "
"If you are unsure, give your best explanation and encourage learning.\n\n"
+ conversation
+ f"User: {message}\nStudyMate:"
)
reply = ""
for chunk in client.text_generation(
prompt, max_new_tokens=512, stream=True, temperature=0.7
):
reply += chunk.token
history.append((message, reply))
return history, ""
except Exception:
continue
history.append((message, "Sorry 😔 I couldn’t generate a full answer right now. Try again."))
return history, ""
# ---------- Career Compass ----------
def career_compass(skill):
if not skill:
return "Please enter a skill or field."
return (
f"📘 **Career Plan for {skill.title()}**\n\n"
f"**1️⃣ Learn:** Take beginner-to-advanced online courses (Coursera, YouTube, freecodecamp).\n"
f"**2️⃣ Build:** Create 2-3 small portfolio projects related to {skill}.\n"
f"**3️⃣ Connect:** Join LinkedIn groups or Pakistani Facebook communities for {skill}.\n"
f"**4️⃣ Earn:** Search for remote jobs or gigs via StudyMate AI’s Job Finder tab.\n"
f"**5️⃣ Scale:** Once confident, teach others — it multiplies learning.\n\n"
f"✨ Tip: Consistency for 90 days = visible progress."
)
# ---------- Voice ----------
def tts_generate(text):
if not text:
return None
try:
client = InferenceClient("coqui-ai/TTS-1-en")
audio = client.audio_generation(text)
return (audio, )
except Exception:
return None
# ---------- CSS (iOS Glass Theme + Animations) ----------
css_style = """
body {
background: radial-gradient(circle at top left, #0a0f1c, #111827);
overflow: hidden;
font-family: 'Poppins', sans-serif;
color: white;
}
.gradio-container {
background: rgba(255, 255, 255, 0.05);
backdrop-filter: blur(20px);
border-radius: 24px;
padding: 24px;
box-shadow: 0 0 20px rgba(255,255,255,0.05);
animation: fadeIn 1.5s ease-in;
}
button:hover {
transform: scale(1.05);
transition: 0.3s;
}
@keyframes fadeIn {
from { opacity: 0; transform: translateY(20px); }
to { opacity: 1; transform: translateY(0); }
}
/* Floating background icons */
.floating {
position: absolute;
width: 60px;
height: 60px;
opacity: 0.15;
animation: float 12s ease-in-out infinite;
}
@keyframes float {
0% { transform: translateY(0) rotate(0deg); }
50% { transform: translateY(-30px) rotate(20deg); }
100% { transform: translateY(0) rotate(0deg); }
}
#icon1 { top: 10%; left: 20%; animation-delay: 0s; }
#icon2 { top: 60%; left: 70%; animation-delay: 2s; }
#icon3 { top: 40%; left: 10%; animation-delay: 4s; }
.loader {
display: inline-block;
width: 20px;
height: 20px;
border: 3px solid rgba(255,255,255,0.3);
border-radius: 50%;
border-top-color: #ffffff;
animation: spin 1s ease-in-out infinite;
}
@keyframes spin {
to { transform: rotate(360deg); }
}
"""
# ---------- UI ----------
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue"), css=css_style) as demo:
gr.HTML("""
<div style='text-align:center; position:relative;'>
<h1 style='font-size:2.5em; color:white;'>StudyMate AI</h1>
<p style='opacity:0.7;'>Your smart study, career, and voice companion</p>
<img id='icon1' class='floating' src='https://cdn-icons-png.flaticon.com/512/1048/1048945.png'/>
<img id='icon2' class='floating' src='https://cdn-icons-png.flaticon.com/512/616/616408.png'/>
<img id='icon3' class='floating' src='https://cdn-icons-png.flaticon.com/512/1828/1828778.png'/>
</div>
""")
with gr.Tab("💬 Chat"):
chatbot = gr.Chatbot(label="Ask anything", height=480)
msg = gr.Textbox(placeholder="Type or speak your question…")
voice_in = gr.Microphone(label="🎤 Speak", type="filepath")
voice_out = gr.Audio(label="🔊 Reply Voice", type="filepath")
clear = gr.Button("Clear Chat")
msg.submit(ai_chat, [msg, chatbot], [chatbot, msg])
clear.click(lambda: None, None, chatbot, queue=False)
def voice_to_text(audio, chat_history):
if audio is None:
return chat_history, ""
try:
client = InferenceClient("openai/whisper-tiny.en")
result = client.automatic_speech_recognition(audio)
text = result.get("text", "")
chat_history, _ = ai_chat(text, chat_history)
reply_audio = tts_generate(chat_history[-1][1])
return chat_history, reply_audio
except Exception:
chat_history.append(("", "Sorry, I couldn’t process voice input."))
return chat_history, None
voice_in.change(voice_to_text, [voice_in, chatbot], [chatbot, voice_out])
with gr.Tab("🧭 Career Compass"):
skill = gr.Textbox(label="Enter your skill or goal")
plan_btn = gr.Button("Generate Career Plan")
plan_output = gr.Markdown()
plan_btn.click(career_compass, skill, plan_output)
demo.launch()