AnujithM commited on
Commit
36b4088
·
verified ·
1 Parent(s): 9c0acde

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +14 -13
  2. app.py +235 -0
  3. requirements.txt +7 -0
README.md CHANGED
@@ -1,14 +1,15 @@
1
- ---
2
- title: ClimaAI
3
- emoji: 🐨
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 5.49.1
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- short_description: Climate reasoning demo using live weather + K2-Think model (
12
- ---
13
 
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ClimaMind — K2-Think + Live Climate Data (Gradio on Hugging Face Spaces)
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ ## Setup
4
+ 1) Create a new Space → SDK = **Gradio**.
5
+ 2) Upload `app.py` and `requirements.txt` (this README is optional).
6
+ 3) In **Settings → Variables / secrets**, set:
7
+ - `PROVIDER` = `hf_model` (recommended) or `local` or `stub`
8
+ - `MODEL_ID` = `MBZUAI-IFM/K2-Think-SFT` (default) or `LLM360/K2-Think`
9
+ - `HF_TOKEN` = your HF token (Read + Inference)
10
+ 4) If choosing `local`, switch the Space hardware to **GPU**.
11
+
12
+ ## Notes
13
+ - Uses Open-Meteo + OpenAQ (keyless).
14
+ - If model returns non-JSON, you’ll see a friendly fallback.
15
+ - If rate-limited, temporarily set `PROVIDER=stub` for the demo.
app.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py — ClimaMind on Hugging Face Spaces (Gradio)
2
+ import os, time, json, random
3
+ import requests
4
+ import gradio as gr
5
+
6
+ PROVIDER = os.getenv("PROVIDER", "hf_model").strip()
7
+ MODEL_ID = os.getenv("MODEL_ID", "MBZUAI-IFM/K2-Think-SFT").strip()
8
+ HF_TOKEN = os.getenv("HF_TOKEN", "").strip()
9
+
10
+ def _get(url, params=None, headers=None, timeout=12, retries=2, backoff=1.6):
11
+ for i in range(retries + 1):
12
+ try:
13
+ r = requests.get(url, params=params, headers=headers, timeout=timeout)
14
+ r.raise_for_status()
15
+ return r
16
+ except Exception:
17
+ if i == retries:
18
+ raise
19
+ time.sleep((backoff ** i) + random.random() * 0.25)
20
+
21
+ def geocode_city(city:str):
22
+ r = _get("https://nominatim.openstreetmap.org/search",
23
+ params={"q": city, "format": "json", "limit": 1},
24
+ headers={"User-Agent": "climamind-space"})
25
+ j = r.json()
26
+ if not j:
27
+ raise RuntimeError("City not found")
28
+ return {"lat": float(j[0]["lat"]), "lon": float(j[0]["lon"]), "name": j[0]["display_name"]}
29
+
30
+ def fetch_open_meteo(lat, lon):
31
+ r = _get("https://api.open-meteo.com/v1/forecast", params={
32
+ "latitude": lat, "longitude": lon,
33
+ "current": "temperature_2m,relative_humidity_2m,wind_speed_10m,precipitation,uv_index",
34
+ "hourly": "temperature_2m,relative_humidity_2m,wind_speed_10m,precipitation_probability,uv_index",
35
+ "timezone": "auto"
36
+ })
37
+ return r.json()
38
+
39
+ def fetch_openaq_pm25(lat, lon):
40
+ r = _get("https://api.openaq.org/v3/latest",
41
+ params={"coordinates": f"{lat},{lon}", "radius": 10000, "limit": 1, "parameter": "pm25"},
42
+ headers={"User-Agent": "climamind-space"})
43
+ j = r.json()
44
+ pm25 = None
45
+ if j.get("results"):
46
+ ms = j["results"][0].get("measurements", [])
47
+ for m in ms:
48
+ if m.get("parameter") == "pm25":
49
+ pm25 = m.get("value")
50
+ break
51
+ return pm25
52
+
53
+ def fetch_factors(lat, lon):
54
+ wx = fetch_open_meteo(lat, lon)
55
+ cur = wx.get("current", {})
56
+ factors = {
57
+ "temp_c": cur.get("temperature_2m"),
58
+ "rh": cur.get("relative_humidity_2m"),
59
+ "wind_kmh": cur.get("wind_speed_10m"),
60
+ "precip_mm": cur.get("precipitation"),
61
+ "uv": cur.get("uv_index"),
62
+ "pm25": fetch_openaq_pm25(lat, lon)
63
+ }
64
+ return {"factors": factors, "raw": wx}
65
+
66
+ def drying_index(temp_c, rh, wind_kmh, cloud_frac=None):
67
+ base = (temp_c or 0) * 1.2 + (wind_kmh or 0) * 0.8 - (rh or 0) * 0.9
68
+ if cloud_frac is not None:
69
+ base -= 20 * cloud_frac
70
+ return max(0, min(100, round(base)))
71
+
72
+ def heat_stress_index(temp_c, rh, wind_kmh):
73
+ hs = (temp_c or 0) * 1.1 + (rh or 0) * 0.3 - (wind_kmh or 0) * 0.2
74
+ return max(0, min(100, round(hs)))
75
+
76
+ PROMPT = """You are ClimaMind, a climate reasoning assistant. Use ONLY the observations provided and return STRICT JSON.
77
+
78
+ Location: {loc} (lat={lat}, lon={lon}), local time: {t_local}
79
+ Observations: temp={temp_c}°C, rh={rh}%, wind={wind_kmh} km/h, precip={precip_mm} mm, uv={uv}, pm25={pm25}
80
+ Derived: drying_index={d_idx}, heat_stress_index={hs_idx}
81
+
82
+ Task: Answer the user’s query: "{query}" for the next 24 hours.
83
+ Steps:
84
+ 1) Identify the relevant factors.
85
+ 2) Reason causally (2–3 steps).
86
+ 3) Give a concise recommendation with time window(s) and a confidence.
87
+ 4) Output a short WHY-TRACE (3 bullets).
88
+ Return JSON ONLY:
89
+ {{
90
+ "answer": "...",
91
+ "why_trace": ["...", "...", "..."],
92
+ "risk_badge": "Low"|"Moderate"|"High"
93
+ }}"""
94
+
95
+ def call_stub(_prompt:str)->str:
96
+ return json.dumps({
97
+ "answer": "Based on 32°C, 50% RH and 12 km/h wind, cotton dries in ~2–3h (faster after 2pm).",
98
+ "why_trace": [
99
+ "Higher temperature and wind increase evaporation rate",
100
+ "Moderate humidity slightly slows drying",
101
+ "Lower afternoon cloud cover speeds it up"
102
+ ],
103
+ "risk_badge": "Low"
104
+ })
105
+
106
+ def call_hf_model(prompt:str)->str:
107
+ from huggingface_hub import InferenceClient
108
+ client = InferenceClient(model=MODEL_ID, token=(HF_TOKEN or None))
109
+ out = client.text_generation(
110
+ prompt,
111
+ max_new_tokens=200,
112
+ temperature=0.1,
113
+ repetition_penalty=1.05,
114
+ do_sample=False,
115
+ )
116
+ return str(out)
117
+
118
+ _local_loaded = False
119
+ def _ensure_local_loaded():
120
+ # Optional local load — requires GPU Space
121
+ global _local_loaded, tokenizer, model
122
+ if _local_loaded:
123
+ return
124
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
125
+ import torch
126
+ bnb_cfg = BitsAndBytesConfig(
127
+ load_in_4bit=True,
128
+ bnb_4bit_compute_dtype=torch.bfloat16,
129
+ bnb_4bit_use_double_quant=True,
130
+ bnb_4bit_quant_type="nf4",
131
+ )
132
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
133
+ model = AutoModelForCausalLM.from_pretrained(
134
+ MODEL_ID,
135
+ trust_remote_code=True,
136
+ device_map="auto",
137
+ quantization_config=bnb_cfg,
138
+ low_cpu_mem_usage=True,
139
+ )
140
+ _local_loaded = True
141
+
142
+ def call_local(prompt:str)->str:
143
+ _ensure_local_loaded()
144
+ import torch
145
+ if hasattr(tokenizer, "apply_chat_template"):
146
+ messages = [{"role":"user","content":prompt}]
147
+ inputs = tokenizer.apply_chat_template(messages, tokenize=True, return_tensors="pt").to(model.device)
148
+ else:
149
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
150
+ with torch.no_grad():
151
+ out = model.generate(
152
+ **inputs,
153
+ max_new_tokens=200,
154
+ temperature=0.1,
155
+ do_sample=False,
156
+ repetition_penalty=1.05,
157
+ eos_token_id=tokenizer.eos_token_id,
158
+ )
159
+ return tokenizer.decode(out[0], skip_special_tokens=True)
160
+
161
+ def reason_answer(loc, coords, factors, query):
162
+ d_idx = drying_index(factors.get("temp_c"), factors.get("rh"), factors.get("wind_kmh"))
163
+ hs_idx = heat_stress_index(factors.get("temp_c"), factors.get("rh"), factors.get("wind_kmh"))
164
+ t_local = time.strftime("%Y-%m-%d %H:%M")
165
+ prompt = PROMPT.format(
166
+ loc=loc, lat=coords["lat"], lon=coords["lon"], t_local=t_local,
167
+ temp_c=factors.get("temp_c"), rh=factors.get("rh"), wind_kmh=factors.get("wind_kmh"),
168
+ precip_mm=factors.get("precip_mm"), uv=factors.get("uv"), pm25=factors.get("pm25"),
169
+ d_idx=d_idx, hs_idx=hs_idx, query=query
170
+ )
171
+
172
+ if PROVIDER == "hf_model":
173
+ raw = call_hf_model(prompt)
174
+ elif PROVIDER == "local":
175
+ raw = call_local(prompt)
176
+ else:
177
+ raw = call_stub(prompt)
178
+
179
+ start, end = raw.find("{"), raw.rfind("}")
180
+ if start == -1 or end == -1:
181
+ return {
182
+ "answer": "The reasoning service returned non-JSON text. Please try again.",
183
+ "why_trace": ["Response formatting issue", "Low temperature helps", "Retry the query"],
184
+ "risk_badge": "Low"
185
+ }
186
+ try:
187
+ return json.loads(raw[start:end+1])
188
+ except Exception:
189
+ return {
190
+ "answer": "Failed to parse JSON from model output.",
191
+ "why_trace": ["JSON parsing error", "Reduce tokens/temperature", "Retry once"],
192
+ "risk_badge": "Low"
193
+ }
194
+
195
+ def app(city, question):
196
+ geo = geocode_city(city)
197
+ data = fetch_factors(geo["lat"], geo["lon"])
198
+ ans = reason_answer(
199
+ geo["name"], {"lat": geo["lat"], "lon": geo["lon"]},
200
+ data["factors"], question
201
+ )
202
+ fx = ", ".join([f"{k}={v}" for k, v in data["factors"].items()])
203
+ why_list = ans.get("why_trace") or []
204
+ why = "\n• " + "\n• ".join(why_list) if why_list else "\n• (no trace returned)"
205
+ md = (
206
+ f"**Answer:** {ans.get('answer','(no answer)')}\n\n"
207
+ f"**Why-trace:**{why}\n\n"
208
+ f"**Risk:** {ans.get('risk_badge','N/A')}\n\n"
209
+ f"**Factors:** {fx}"
210
+ )
211
+ return md
212
+
213
+ demo = gr.Interface(
214
+ fn=app,
215
+ inputs=[
216
+ gr.Textbox(label="City", value="New Delhi"),
217
+ gr.Dropdown(
218
+ choices=[
219
+ "If I wash clothes now, when will they dry?",
220
+ "Should I water my plants today or wait?",
221
+ "What is the heat/wildfire risk today? Explain briefly."
222
+ ],
223
+ label="Question",
224
+ value="If I wash clothes now, when will they dry?"
225
+ )
226
+ ],
227
+ outputs=gr.Markdown(label="ClimaMind"),
228
+ title="ClimaMind — K2-Think + Live Climate Data",
229
+ description="Provider = hf_model (Inference API) | local (GPU Space) | stub (offline). Configure env in Space settings.",
230
+ allow_flagging="never"
231
+ )
232
+ demo.queue(concurrency_count=2, max_size=8)
233
+
234
+ if __name__ == "__main__":
235
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio>=4.44.0
2
+ requests>=2.31.0
3
+ huggingface_hub>=0.23.0
4
+ transformers>=4.43.0
5
+ accelerate>=0.31.0
6
+ bitsandbytes>=0.43.0
7
+ sentencepiece>=0.2.0