Adedoyinjames commited on
Commit
ecc1ef6
Β·
verified Β·
1 Parent(s): 13f0fa8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -164
app.py CHANGED
@@ -3,147 +3,50 @@ from pydantic import BaseModel
3
  import uvicorn
4
  import torch
5
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
6
- from huggingface_hub import login
7
- import os
8
  import time
9
  from fastapi.middleware.cors import CORSMiddleware
10
 
11
- # Get Hugging Face token from environment variables
12
- HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN", "")
13
-
14
- if HUGGINGFACE_TOKEN:
15
- try:
16
- login(token=HUGGINGFACE_TOKEN)
17
- print("βœ… Successfully connected to Hugging Face")
18
- except Exception as e:
19
- print(f"❌ Hugging Face login failed: {e}")
20
-
21
- # Personal and Company Information
22
- PERSONAL_INFO = {
23
- "name": "Adedoyin Ifeoluwa James",
24
- "role": "Entrepreneur, Founder & CEO of YAH Tech",
25
- "goals": [
26
- "Build profitable systems that matter",
27
- "Reshaping the economical world",
28
- "Use software development to create flexibility and contribute to economic growth"
29
- ]
30
- }
31
-
32
- COMPANY_INFO = {
33
- "name": "YAH Tech",
34
- "type": "venture studio / app development company focused on solving problems with futuristic solutions",
35
- "purpose": "Build and launch technology-driven ventures that generate profit and societal value",
36
- "primary_activity": "App development and creation of scalable business systems",
37
- "philosophy": "Learn, understand, create, and evaluate",
38
- "stage": "Growth and development, focused on building sustainable processes and systems"
39
- }
40
 
41
- # Knowledge base for the AI
42
- KNOWLEDGE_BASE = {
43
- "personal": {
44
- "who are you": f"I am YAH Bot, the personal AI assistant for {PERSONAL_INFO['name']}. I help answer questions about him and his company YAH Tech.",
45
- "what are your goals": f"My purpose is to assist with information about {PERSONAL_INFO['name']} and YAH Tech. His goals include: {', '.join(PERSONAL_INFO['goals'])}",
46
- "what do you do": f"I provide information about {PERSONAL_INFO['name']} and his work at YAH Tech.",
47
- "tell me about yourself": f"I'm YAH Bot, an AI assistant created to help people learn about {PERSONAL_INFO['name']} and his technology ventures.",
48
- "background": f"{PERSONAL_INFO['name']} is {PERSONAL_INFO['role']} focused on building impactful technology systems.",
49
- "experience": f"As {PERSONAL_INFO['role']}, {PERSONAL_INFO['name']} has focused on building YAH Tech into a successful venture studio.",
50
- },
51
- "company": {
52
- "what is yah tech": f"{COMPANY_INFO['name']} is a {COMPANY_INFO['type']} that {COMPANY_INFO['purpose'].lower()}.",
53
- "what does yah tech do": f"We specialize in {COMPANY_INFO['primary_activity'].lower()}. {COMPANY_INFO['purpose']}.",
54
- "company philosophy": f"Our philosophy is: '{COMPANY_INFO['philosophy']}'",
55
- "company stage": f"We're currently in the {COMPANY_INFO['stage']} phase.",
56
- "venture studio": "As a venture studio, we build complete businesses with scalable systems and futuristic solutions.",
57
- "services": f"{COMPANY_INFO['name']} offers app development, venture building, business system design, technology solutions, and scalable platform development.",
58
- "mission": f"Our mission at {COMPANY_INFO['name']} is to {COMPANY_INFO['purpose'].lower()}.",
59
- }
60
- }
61
 
62
  class YAHBot:
63
  def __init__(self):
64
- # Changed: Now loading from YOUR private repo
65
- self.model_name = "Adedoyinjames/YAH-Tech-Chat-Bot"
66
  self.tokenizer = None
67
  self.model = None
68
  self._load_model()
69
 
70
  def _load_model(self):
71
- """Load the Hugging Face model from YOUR repo"""
72
  try:
73
- print(f"πŸ”„ Loading YAH Tech AI model from {self.model_name}...")
74
-
75
- # Use token from environment for private repo access
76
- token = os.environ.get("HUGGINGFACE_TOKEN", "")
77
-
78
- if token:
79
- # Load with authentication for private repo
80
- self.tokenizer = AutoTokenizer.from_pretrained(
81
- self.model_name,
82
- use_auth_token=token
83
- )
84
- self.model = AutoModelForSeq2SeqLM.from_pretrained(
85
- self.model_name,
86
- use_auth_token=token
87
- )
88
- else:
89
- # Fallback: try without auth (will work if repo becomes public)
90
- self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
91
- self.model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
92
-
93
- print("βœ… YAH Tech AI model loaded successfully from your repo!")
94
-
95
  except Exception as e:
96
- print(f"❌ Failed to load AI model from your repo: {e}")
97
- print("πŸ”„ Falling back to default model...")
98
- # Fallback to public model if your repo fails
99
- try:
100
- self.model_name = "google/flan-t5-base"
101
- self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
102
- self.model = AutoModelForSeq2SeqLM.from_pretrained(self.model_name)
103
- print("βœ… Fallback model loaded successfully!")
104
- except Exception as fallback_error:
105
- print(f"❌ Fallback model also failed: {fallback_error}")
106
- self.model = None
107
- self.tokenizer = None
108
-
109
- def _get_knowledge_response(self, user_input):
110
- """Check if the input matches any knowledge base entries"""
111
- user_input_lower = user_input.lower()
112
-
113
- # Check for specific brand-related queries
114
- if any(word in user_input_lower for word in ["adedoyin", "ifeoluwa", "james"]):
115
- return KNOWLEDGE_BASE["personal"]["who are you"]
116
-
117
- if any(word in user_input_lower for word in ["yah tech", "your company", "venture studio"]):
118
- return KNOWLEDGE_BASE["company"]["what is yah tech"]
119
-
120
- # Check knowledge base
121
- for category in KNOWLEDGE_BASE.values():
122
- for key, response in category.items():
123
- if key in user_input_lower:
124
- return response
125
-
126
- return None
127
-
128
- def _create_smart_prompt(self, user_input):
129
- """Create a much better prompt that separates brand info from general knowledge"""
130
- prompt = f"""You are a helpful AI assistant. Answer the following question accurately and helpfully.
131
- Question: {user_input}
132
- Answer: """
133
- return prompt
134
 
135
  def generate_response(self, user_input):
136
- """Generate response using knowledge base or AI model"""
137
- # First check knowledge base for brand-specific queries
138
- knowledge_response = self._get_knowledge_response(user_input)
139
- if knowledge_response:
140
- return knowledge_response
141
-
142
- # Use AI model for general knowledge questions
143
  if self.model and self.tokenizer:
144
  try:
145
- # Create clean prompt without brand bias for general questions
146
- prompt = self._create_smart_prompt(user_input)
147
 
148
  # Tokenize
149
  inputs = self.tokenizer(
@@ -163,7 +66,6 @@ Answer: """
163
  temperature=0.7,
164
  do_sample=True,
165
  pad_token_id=self.tokenizer.pad_token_id,
166
- repetition_penalty=1.2
167
  )
168
 
169
  response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
@@ -171,25 +73,9 @@ Answer: """
171
 
172
  except Exception as e:
173
  print(f"Model error: {str(e)}")
174
- return "I apologize, but I'm having trouble processing your question right now. Please try again."
175
 
176
- return "I'm currently unavailable. Please check back later."
177
-
178
- # Initialize FastAPI app
179
- app = FastAPI(
180
- title="YAH Tech AI API",
181
- description=f"AI Assistant API for {PERSONAL_INFO['name']} - Personal assistant and company information",
182
- version="1.0.0"
183
- )
184
-
185
- # Add CORS middleware
186
- app.add_middleware(
187
- CORSMiddleware,
188
- allow_origins=["*"],
189
- allow_credentials=True,
190
- allow_methods=["*"],
191
- allow_headers=["*"],
192
- )
193
 
194
  # Initialize the bot globally
195
  yah_bot = YAHBot()
@@ -197,7 +83,6 @@ yah_bot = YAHBot()
197
  # Request/Response models
198
  class ChatRequest(BaseModel):
199
  message: str
200
- user_id: str = None
201
 
202
  class ChatResponse(BaseModel):
203
  response: str
@@ -209,22 +94,16 @@ class HealthResponse(BaseModel):
209
  service: str
210
  timestamp: float
211
 
212
- class InfoResponse(BaseModel):
213
- personal_info: dict
214
- company_info: dict
215
- model: str
216
- status: str
217
-
218
  # API Endpoints
219
  @app.get("/")
220
  async def root():
221
  return {
222
- "message": f"YAH Tech AI API is running - Assistant for {PERSONAL_INFO['name']}",
223
  "status": "active",
 
224
  "endpoints": {
225
  "chat": "POST /api/chat",
226
- "health": "GET /api/health",
227
- "info": "GET /api/info"
228
  }
229
  }
230
 
@@ -252,18 +131,6 @@ async def health_check():
252
  timestamp=time.time()
253
  )
254
 
255
- @app.get("/api/info", response_model=InfoResponse)
256
- async def get_info():
257
- """
258
- Get information about the AI assistant and company
259
- """
260
- return InfoResponse(
261
- personal_info=PERSONAL_INFO,
262
- company_info=COMPANY_INFO,
263
- model=yah_bot.model_name,
264
- status="active"
265
- )
266
-
267
  # For Hugging Face Spaces
268
  def get_app():
269
  return app
 
3
  import uvicorn
4
  import torch
5
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
 
 
6
  import time
7
  from fastapi.middleware.cors import CORSMiddleware
8
 
9
+ # Initialize FastAPI app
10
+ app = FastAPI(
11
+ title="YAH Tech AI API",
12
+ description="AI Assistant API for testing",
13
+ version="1.0.0"
14
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ # Add CORS middleware
17
+ app.add_middleware(
18
+ CORSMiddleware,
19
+ allow_origins=["*"],
20
+ allow_credentials=True,
21
+ allow_methods=["*"],
22
+ allow_headers=["*"],
23
+ )
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  class YAHBot:
26
  def __init__(self):
27
+ # βœ… Changed to load from your HF repo instead of direct model name
28
+ self.repo_id = "Adedoyinjames/brain-ai" # Your HF repo
29
  self.tokenizer = None
30
  self.model = None
31
  self._load_model()
32
 
33
  def _load_model(self):
34
+ """Load the model from your Hugging Face repo"""
35
  try:
36
+ print(f"πŸ”„ Loading AI model from {self.repo_id}...")
37
+ self.tokenizer = AutoTokenizer.from_pretrained(self.repo_id)
38
+ self.model = AutoModelForSeq2SeqLM.from_pretrained(self.repo_id)
39
+ print("βœ… AI model loaded successfully from HF repo!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  except Exception as e:
41
+ print(f"❌ Failed to load AI model from repo: {e}")
42
+ self.model = None
43
+ self.tokenizer = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  def generate_response(self, user_input):
46
+ """Generate response using AI model"""
 
 
 
 
 
 
47
  if self.model and self.tokenizer:
48
  try:
49
+ prompt = f"Question: {user_input}\nAnswer: "
 
50
 
51
  # Tokenize
52
  inputs = self.tokenizer(
 
66
  temperature=0.7,
67
  do_sample=True,
68
  pad_token_id=self.tokenizer.pad_token_id,
 
69
  )
70
 
71
  response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
 
73
 
74
  except Exception as e:
75
  print(f"Model error: {str(e)}")
76
+ return "I apologize, but I'm having trouble processing your question right now."
77
 
78
+ return "AI model is not available."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
  # Initialize the bot globally
81
  yah_bot = YAHBot()
 
83
  # Request/Response models
84
  class ChatRequest(BaseModel):
85
  message: str
 
86
 
87
  class ChatResponse(BaseModel):
88
  response: str
 
94
  service: str
95
  timestamp: float
96
 
 
 
 
 
 
 
97
  # API Endpoints
98
  @app.get("/")
99
  async def root():
100
  return {
101
+ "message": "YAH Tech AI API is running",
102
  "status": "active",
103
+ "model_repo": yah_bot.repo_id, # Show which repo is being used
104
  "endpoints": {
105
  "chat": "POST /api/chat",
106
+ "health": "GET /api/health"
 
107
  }
108
  }
109
 
 
131
  timestamp=time.time()
132
  )
133
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  # For Hugging Face Spaces
135
  def get_app():
136
  return app