Files changed (1) hide show
  1. app.py +209 -173
app.py CHANGED
@@ -1,9 +1,10 @@
1
- import streamlit as st
2
  import torch
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
  from huggingface_hub import login
5
  import random
6
  import os
 
7
 
8
  # Get Hugging Face token from environment variables
9
  HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN", "")
@@ -11,78 +12,9 @@ HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN", "")
11
  if HUGGINGFACE_TOKEN:
12
  try:
13
  login(token=HUGGINGFACE_TOKEN)
14
- st.success("βœ… Successfully connected to Hugging Face")
15
  except Exception as e:
16
- st.error(f"❌ Hugging Face login failed: {e}")
17
-
18
- # Custom CSS for beautiful silver/sky blue theme
19
- st.markdown("""
20
- <style>
21
- .main {
22
- background: linear-gradient(135deg, #f8f9fa 0%, #e3f2fd 100%);
23
- font-family: 'Inter', sans-serif;
24
- }
25
-
26
- .stChatMessage {
27
- padding: 20px;
28
- border-radius: 16px;
29
- margin: 12px 0;
30
- box-shadow: 0 2px 8px rgba(0,0,0,0.1);
31
- border: none;
32
- }
33
-
34
- /* User message styling */
35
- .stChatMessage:has(div[data-testid="stChatMessageContent"] > div:first-child > div:first-child) {
36
- background: linear-gradient(135deg, #e3f2fd 0%, #bbdefb 100%);
37
- border-left: 6px solid #2196f3;
38
- }
39
-
40
- /* Assistant message styling */
41
- .stChatMessage:has(div[data-testid="stChatMessageContent"] > div:first-child > div:last-child) {
42
- background: linear-gradient(135deg, #ffffff 0%, #f5f5f5 100%);
43
- border-left: 6px solid #78909c;
44
- border: 1px solid #e0e0e0;
45
- }
46
-
47
- .stTextInput > div > div > input {
48
- border: 2px solid #90caf9;
49
- border-radius: 25px;
50
- padding: 15px 20px;
51
- font-size: 16px;
52
- background: white;
53
- box-shadow: 0 2px 10px rgba(33, 150, 243, 0.1);
54
- }
55
-
56
- .stButton > button {
57
- background: linear-gradient(135deg, #2196f3 0%, #1976d2 100%);
58
- color: white;
59
- border: none;
60
- border-radius: 25px;
61
- padding: 12px 28px;
62
- font-weight: 600;
63
- font-size: 16px;
64
- box-shadow: 0 4px 12px rgba(33, 150, 243, 0.3);
65
- transition: all 0.3s ease;
66
- }
67
-
68
- .stButton > button:hover {
69
- transform: translateY(-2px);
70
- box-shadow: 0 6px 16px rgba(33, 150, 243, 0.4);
71
- }
72
-
73
- .sidebar .sidebar-content {
74
- background: linear-gradient(180deg, #eceff1 0%, #cfd8dc 100%);
75
- border-right: 1px solid #b0bec5;
76
- }
77
-
78
- .header-gradient {
79
- background: linear-gradient(135deg, #2196f3 0%, #1976d2 100%);
80
- -webkit-background-clip: text;
81
- -webkit-text-fill-color: transparent;
82
- background-clip: text;
83
- }
84
- </style>
85
- """, unsafe_allow_html=True)
86
 
87
  # Personal and Company Information
88
  PERSONAL_INFO = {
@@ -142,27 +74,27 @@ class YAHBot:
142
  def _load_model(self):
143
  """Load the Hugging Face model"""
144
  try:
145
- with st.spinner("πŸ”„ Loading YAH Tech AI model..."):
146
- # Try to load custom model first
147
- self.tokenizer = AutoTokenizer.from_pretrained(
148
- "Adedoyinjames/YAH-Tech-Chat-Bot",
149
- token=HUGGINGFACE_TOKEN if HUGGINGFACE_TOKEN else None
150
- )
151
- self.model = AutoModelForSeq2SeqLM.from_pretrained(
152
- "Adedoyinjames/YAH-Tech-Chat-Bot",
153
- token=HUGGINGFACE_TOKEN if HUGGINGFACE_TOKEN else None,
154
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
155
- )
156
- st.success("βœ… YAH Tech AI model loaded successfully!")
157
  except Exception as e:
158
- st.error(f"❌ Failed to load custom model: {e}")
159
- st.info("πŸ”„ Falling back to standard FLAN-T5-base model...")
160
  try:
161
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
162
  self.model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
163
- st.success("βœ… Standard AI model loaded successfully!")
164
  except Exception as e2:
165
- st.error(f"❌ Failed to load AI model: {e2}")
166
  self.model = None
167
  self.tokenizer = None
168
 
@@ -197,22 +129,18 @@ class YAHBot:
197
  def _create_smart_prompt(self, user_input, conversation_history):
198
  """Create a smart prompt that combines brand identity with general knowledge"""
199
  brand_context = f"""You are YAH Bot, an AI assistant representing {PERSONAL_INFO['name']} and {COMPANY_INFO['name']}.
200
-
201
  About {PERSONAL_INFO['name']}:
202
  - {PERSONAL_INFO['role']}
203
  - Goals: {', '.join(PERSONAL_INFO['goals'])}
204
-
205
  About {COMPANY_INFO['name']}:
206
  - {COMPANY_INFO['type']}
207
  - Philosophy: "{COMPANY_INFO['philosophy']}"
208
  - Purpose: {COMPANY_INFO['purpose']}
209
-
210
  When answering questions:
211
  1. First provide accurate, helpful information
212
  2. Naturally mention YAH Tech or Adedoyin James when relevant
213
  3. Be professional but conversational
214
  4. Connect general topics to technology, business, or innovation when appropriate
215
-
216
  Current conversation:"""
217
 
218
  # Build conversation history
@@ -265,7 +193,7 @@ Current conversation:"""
265
  return cleaned_response
266
 
267
  except Exception as e:
268
- st.error(f"Model error: {str(e)}")
269
  return self._get_fallback_response(user_input)
270
 
271
  # Fallback if model fails to load
@@ -309,69 +237,193 @@ Current conversation:"""
309
  ]
310
  return random.choice(fallback_responses)
311
 
312
- def initialize_session_state():
313
- """Initialize session state variables"""
314
- if "messages" not in st.session_state:
315
- st.session_state.messages = []
316
- if "bot" not in st.session_state:
317
- st.session_state.bot = YAHBot()
318
- if "conversation_started" not in st.session_state:
319
- st.session_state.conversation_started = False
320
 
321
- def main():
322
- # Page configuration
323
- st.set_page_config(
324
- page_title="YAH Tech Assistant",
325
- page_icon="πŸš€",
326
- layout="centered",
327
- initial_sidebar_state="expanded"
328
- )
329
 
330
- # Initialize session state
331
- initialize_session_state()
 
 
 
 
 
 
332
 
333
- # Header with beautiful gradient
334
- col1, col2 = st.columns([1, 4])
335
- with col1:
336
- st.image("https://via.placeholder.com/100/2196f3/ffffff?text=YT", width=100)
337
- with col2:
338
- st.markdown('<h1 class="header-gradient">YAH Tech Assistant</h1>', unsafe_allow_html=True)
339
- st.caption("πŸš€ Powered by AI β€’ πŸ’Ό Venture Studio β€’ 🌍 Economic Innovation")
340
 
341
- st.markdown("---")
342
 
343
- # Sidebar
344
- with st.sidebar:
345
- st.header("🏒 About YAH Tech")
346
- st.markdown(f"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
  **πŸ‘€ Founder**: {PERSONAL_INFO['name']}
348
  **πŸ’Ό Role**: {PERSONAL_INFO['role']}
 
349
 
350
- **🏒 Company**: {COMPANY_INFO['name']}
351
- **🎯 Focus**: {COMPANY_INFO['type']}
352
- **πŸ“ˆ Stage**: {COMPANY_INFO['stage']}
353
-
354
- **πŸ’‘ Philosophy**: *{COMPANY_INFO['philosophy']}*
355
- """)
356
-
357
- st.markdown("---")
358
- st.subheader("πŸ’¬ Ask Me Anything")
359
- st.markdown("""
360
- **Examples:**
361
- - *"What's the capital of France?"* πŸ‡«πŸ‡·
362
- - *"Tell me about AI technology"* πŸ€–
363
- - *"How to start a business?"* πŸ’Ό
364
- - *"What is YAH Tech?"* 🏒
365
- - *"Who is Adedoyin James?"* πŸ‘€
366
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367
 
368
- if st.button("πŸ—‘οΈ Clear Chat History", use_container_width=True):
369
- st.session_state.messages = []
370
- st.session_state.conversation_started = False
371
- st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372
 
373
- # Chat interface
374
- if not st.session_state.conversation_started:
 
 
 
375
  welcome_msg = f"""
376
  πŸ‘‹ **Hello! I'm YAH Bot, your AI assistant for everything related to {PERSONAL_INFO['name']} and {COMPANY_INFO['name']}!**
377
 
@@ -386,33 +438,17 @@ def main():
386
 
387
  **How can I assist you today?** πŸš€
388
  """
389
- st.info(welcome_msg)
390
- st.session_state.conversation_started = True
391
-
392
- # Display chat messages
393
- for message in st.session_state.messages:
394
- with st.chat_message(message["role"]):
395
- st.markdown(message["content"])
396
 
397
- # Chat input
398
- if prompt := st.chat_input("πŸ’¬ Ask me anything - general knowledge or about YAH Tech..."):
399
- # Add user message to chat history
400
- st.session_state.messages.append({"role": "user", "content": prompt})
401
- with st.chat_message("user"):
402
- st.markdown(prompt)
403
-
404
- # Generate and display assistant response
405
- with st.chat_message("assistant"):
406
- with st.spinner("πŸ€” Thinking..."):
407
- try:
408
- response = st.session_state.bot.generate_response(prompt, st.session_state.messages)
409
- st.markdown(response)
410
- except Exception as e:
411
- error_msg = f"⚠️ I encountered an issue: {str(e)}\n\nBut at YAH Tech, we always find solutions! Feel free to ask me something else."
412
- st.markdown(error_msg)
413
- response = error_msg
414
-
415
- st.session_state.messages.append({"role": "assistant", "content": response})
416
 
 
 
 
417
  if __name__ == "__main__":
418
- main()
 
 
 
 
 
1
+ import gradio as gr
2
  import torch
3
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
  from huggingface_hub import login
5
  import random
6
  import os
7
+ import json
8
 
9
  # Get Hugging Face token from environment variables
10
  HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN", "")
 
12
  if HUGGINGFACE_TOKEN:
13
  try:
14
  login(token=HUGGINGFACE_TOKEN)
15
+ print("βœ… Successfully connected to Hugging Face")
16
  except Exception as e:
17
+ print(f"❌ Hugging Face login failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
  # Personal and Company Information
20
  PERSONAL_INFO = {
 
74
  def _load_model(self):
75
  """Load the Hugging Face model"""
76
  try:
77
+ print("πŸ”„ Loading YAH Tech AI model...")
78
+ # Try to load custom model first
79
+ self.tokenizer = AutoTokenizer.from_pretrained(
80
+ "Adedoyinjames/YAH-Tech-Chat-Bot",
81
+ token=HUGGINGFACE_TOKEN if HUGGINGFACE_TOKEN else None
82
+ )
83
+ self.model = AutoModelForSeq2SeqLM.from_pretrained(
84
+ "Adedoyinjames/YAH-Tech-Chat-Bot",
85
+ token=HUGGINGFACE_TOKEN if HUGGINGFACE_TOKEN else None,
86
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
87
+ )
88
+ print("βœ… YAH Tech AI model loaded successfully!")
89
  except Exception as e:
90
+ print(f"❌ Failed to load custom model: {e}")
91
+ print("πŸ”„ Falling back to standard FLAN-T5-base model...")
92
  try:
93
  self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
94
  self.model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
95
+ print("βœ… Standard AI model loaded successfully!")
96
  except Exception as e2:
97
+ print(f"❌ Failed to load AI model: {e2}")
98
  self.model = None
99
  self.tokenizer = None
100
 
 
129
  def _create_smart_prompt(self, user_input, conversation_history):
130
  """Create a smart prompt that combines brand identity with general knowledge"""
131
  brand_context = f"""You are YAH Bot, an AI assistant representing {PERSONAL_INFO['name']} and {COMPANY_INFO['name']}.
 
132
  About {PERSONAL_INFO['name']}:
133
  - {PERSONAL_INFO['role']}
134
  - Goals: {', '.join(PERSONAL_INFO['goals'])}
 
135
  About {COMPANY_INFO['name']}:
136
  - {COMPANY_INFO['type']}
137
  - Philosophy: "{COMPANY_INFO['philosophy']}"
138
  - Purpose: {COMPANY_INFO['purpose']}
 
139
  When answering questions:
140
  1. First provide accurate, helpful information
141
  2. Naturally mention YAH Tech or Adedoyin James when relevant
142
  3. Be professional but conversational
143
  4. Connect general topics to technology, business, or innovation when appropriate
 
144
  Current conversation:"""
145
 
146
  # Build conversation history
 
193
  return cleaned_response
194
 
195
  except Exception as e:
196
+ print(f"Model error: {str(e)}")
197
  return self._get_fallback_response(user_input)
198
 
199
  # Fallback if model fails to load
 
237
  ]
238
  return random.choice(fallback_responses)
239
 
240
+ # Initialize the bot globally
241
+ yah_bot = YAHBot()
 
 
 
 
 
 
242
 
243
+ def chat_function(message, history):
244
+ """Chat function for Gradio interface"""
245
+ # Convert Gradio history format to our format
246
+ conversation_history = []
247
+ for human_msg, assistant_msg in history:
248
+ conversation_history.append({"role": "user", "content": human_msg})
249
+ conversation_history.append({"role": "assistant", "content": assistant_msg})
 
250
 
251
+ # Generate response
252
+ response = yah_bot.generate_response(message, conversation_history)
253
+ return response
254
+
255
+ def api_chat_endpoint(message: str, history: list = None):
256
+ """API endpoint function"""
257
+ if history is None:
258
+ history = []
259
 
260
+ conversation_history = []
261
+ for human_msg, assistant_msg in history:
262
+ conversation_history.append({"role": "user", "content": human_msg})
263
+ conversation_history.append({"role": "assistant", "content": assistant_msg})
 
 
 
264
 
265
+ response = yah_bot.generate_response(message, conversation_history)
266
 
267
+ return {
268
+ "response": response,
269
+ "status": "success",
270
+ "message": message
271
+ }
272
+
273
+ # Custom CSS for styling
274
+ custom_css = """
275
+ .gradio-container {
276
+ background: linear-gradient(135deg, #f8f9fa 0%, #e3f2fd 100%);
277
+ font-family: 'Inter', sans-serif;
278
+ }
279
+
280
+ .chatbot {
281
+ border-radius: 16px;
282
+ box-shadow: 0 4px 20px rgba(0,0,0,0.1);
283
+ }
284
+
285
+ .user {
286
+ background: linear-gradient(135deg, #e3f2fd 0%, #bbdefb 100%) !important;
287
+ border-left: 6px solid #2196f3 !important;
288
+ border-radius: 16px !important;
289
+ margin: 12px 0 !important;
290
+ }
291
+
292
+ .bot {
293
+ background: linear-gradient(135deg, #ffffff 0%, #f5f5f5 100%) !important;
294
+ border-left: 6px solid #78909c !important;
295
+ border: 1px solid #e0e0e0 !important;
296
+ border-radius: 16px !important;
297
+ margin: 12px 0 !important;
298
+ }
299
+
300
+ .textbox {
301
+ border: 2px solid #90caf9 !important;
302
+ border-radius: 25px !important;
303
+ padding: 15px 20px !important;
304
+ font-size: 16px !important;
305
+ background: white !important;
306
+ box-shadow: 0 2px 10px rgba(33, 150, 243, 0.1) !important;
307
+ }
308
+
309
+ .button {
310
+ background: linear-gradient(135deg, #2196f3 0%, #1976d2 100%) !important;
311
+ color: white !important;
312
+ border: none !important;
313
+ border-radius: 25px !important;
314
+ padding: 12px 28px !important;
315
+ font-weight: 600 !important;
316
+ font-size: 16px !important;
317
+ box-shadow: 0 4px 12px rgba(33, 150, 243, 0.3) !important;
318
+ }
319
+
320
+ .button:hover {
321
+ transform: translateY(-2px);
322
+ box-shadow: 0 6px 16px rgba(33, 150, 243, 0.4) !important;
323
+ }
324
+
325
+ .header-gradient {
326
+ background: linear-gradient(135deg, #2196f3 0%, #1976d2 100%);
327
+ -webkit-background-clip: text;
328
+ -webkit-text-fill-color: transparent;
329
+ background-clip: text;
330
+ }
331
+ """
332
+
333
+ # Create the Gradio interface
334
+ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
335
+ gr.Markdown(
336
+ f"""
337
+ # <div class="header-gradient">YAH Tech Assistant</div>
338
+ πŸš€ Powered by AI β€’ πŸ’Ό Venture Studio β€’ 🌍 Economic Innovation
339
+
340
  **πŸ‘€ Founder**: {PERSONAL_INFO['name']}
341
  **πŸ’Ό Role**: {PERSONAL_INFO['role']}
342
+ **🏒 Company**: {COMPANY_INFO['name']} - {COMPANY_INFO['type']}
343
 
344
+ *Ask me anything - general knowledge or about YAH Tech!*
345
+ """
346
+ )
347
+
348
+ gr.Markdown("---")
349
+
350
+ with gr.Row():
351
+ with gr.Column(scale=3):
352
+ chatbot = gr.Chatbot(
353
+ label="YAH Tech Chat",
354
+ bubble_full_width=False,
355
+ show_copy_button=True,
356
+ avatar_images=(
357
+ "https://via.placeholder.com/40/2196f3/ffffff?text=U",
358
+ "https://via.placeholder.com/40/78909c/ffffff?text=Y"
359
+ )
360
+ )
361
+
362
+ with gr.Row():
363
+ msg = gr.Textbox(
364
+ placeholder="πŸ’¬ Ask me anything - general knowledge or about YAH Tech...",
365
+ container=False,
366
+ scale=4
367
+ )
368
+ clear_btn = gr.Button("πŸ—‘οΈ Clear", scale=1)
369
+
370
+ gr.Markdown("""
371
+ **πŸ’‘ Example questions:**
372
+ - *"What's the capital of France?"* πŸ‡«πŸ‡·
373
+ - *"Tell me about AI technology"* πŸ€–
374
+ - *"How to start a business?"* πŸ’Ό
375
+ - *"What is YAH Tech?"* 🏒
376
+ - *"Who is Adedoyin James?"* πŸ‘€
377
+ """)
378
 
379
+ with gr.Column(scale=1):
380
+ gr.Markdown("### 🏒 About YAH Tech")
381
+ gr.Markdown(f"""
382
+ **🎯 Focus**: {COMPANY_INFO['type']}
383
+ **πŸ“ˆ Stage**: {COMPANY_INFO['stage']}
384
+
385
+ **πŸ’‘ Philosophy**: *"{COMPANY_INFO['philosophy']}"*
386
+
387
+ **🎯 Purpose**: {COMPANY_INFO['purpose']}
388
+
389
+ **πŸ› οΈ Services**:
390
+ β€’ App development
391
+ β€’ Venture building
392
+ β€’ Business system design
393
+ β€’ Technology solutions
394
+ β€’ Scalable platform development
395
+ """)
396
+
397
+ gr.Markdown("---")
398
+
399
+ gr.Markdown("### πŸ”— API Usage")
400
+ gr.Markdown("""
401
+ ```python
402
+ import requests
403
+
404
+ response = requests.post(
405
+ "YOUR_SPACE_URL/api/predict",
406
+ json={
407
+ "data": [
408
+ "Your message here",
409
+ []
410
+ ]
411
+ }
412
+ )
413
+ ```
414
+ """)
415
+
416
+ # Event handlers
417
+ def respond(message, chat_history):
418
+ bot_message = chat_function(message, chat_history)
419
+ chat_history.append((message, bot_message))
420
+ return "", chat_history
421
 
422
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
423
+ clear_btn.click(lambda: None, None, chatbot, queue=False)
424
+
425
+ # Initial welcome message
426
+ def add_welcome_message(chat_history):
427
  welcome_msg = f"""
428
  πŸ‘‹ **Hello! I'm YAH Bot, your AI assistant for everything related to {PERSONAL_INFO['name']} and {COMPANY_INFO['name']}!**
429
 
 
438
 
439
  **How can I assist you today?** πŸš€
440
  """
441
+ chat_history.append(("Hi", welcome_msg))
442
+ return chat_history
 
 
 
 
 
443
 
444
+ demo.load(add_welcome_message, [chatbot], [chatbot])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
445
 
446
+ # For API usage, Gradio automatically creates endpoints at:
447
+ # - /api/predict for the main chat function
448
+ # - /info for space information
449
  if __name__ == "__main__":
450
+ demo.launch(
451
+ share=True,
452
+ show_api=True,
453
+ server_name="0.0.0.
454
+ )