Spaces:
Sleeping
Sleeping
| import pickle | |
| import os | |
| import numpy as np | |
| import pandas as pd | |
| from tensorflow import keras | |
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| from typing import List, Dict | |
| from huggingface_hub import hf_hub_download | |
| os.environ['HF_HOME'] = '/app/.cache' | |
| app = FastAPI(title="Blood Demand Predictor") | |
| HOSPITALS = [ | |
| "Tikur Anbessa Specialized Hospital", | |
| "Yekatit 12 Hospital", | |
| "St. Paul's Hospital Millennium Medical College", | |
| "Amin General Hospital", | |
| "Bethzatha General Hospital", | |
| "St. Gabriel General Hospital", | |
| "Hayat Hospital", | |
| "Kadisco General Hospital", | |
| "Landmark General Hospital", | |
| "Myungsung Christian Medical Center" | |
| ] | |
| BLOOD_GROUPS = ["A+", "A-", "B+", "B-", "AB+", "AB-", "O+", "O-"] | |
| features = ['Temperature', 'Events', 'Weekday', 'Weekend', | |
| 'DayOfYear', 'DayOfMonth', 'WeekOfYear', 'Year'] | |
| target = 'BloodDemand' | |
| SEQUENCE_LENGTH = 14 | |
| DAYS_TO_PREDICT = 7 | |
| model = None | |
| last_sequences = None | |
| feature_scaler = None | |
| target_scaler = None | |
| async def load_resources(): | |
| global model, last_sequences, feature_scaler, target_scaler | |
| try: | |
| model_path = hf_hub_download( | |
| repo_id="yohannes-07/lstm-blood-demand-predictor", | |
| filename="lstm_blood_demand_v1_0.keras", | |
| revision="main" , | |
| force_download=True, | |
| cache_dir='/app/.cache' | |
| ) | |
| model = keras.models.load_model(model_path) | |
| sequences_path = hf_hub_download( | |
| repo_id="yohannes-07/lstm-blood-demand-predictor", | |
| filename="last_sequences.pkl", | |
| revision="main", | |
| force_download=True, | |
| cache_dir='/app/.cache' | |
| ) | |
| with open(sequences_path, 'rb') as f: | |
| last_sequences = pickle.load(f) | |
| feature_scaler_path = hf_hub_download( | |
| repo_id="yohannes-07/lstm-blood-demand-predictor", | |
| filename="feature_scaler.pkl", | |
| revision="main", | |
| force_download=True, | |
| cache_dir='/app/.cache' | |
| ) | |
| with open(feature_scaler_path, 'rb') as f: | |
| feature_scaler = pickle.load(f) | |
| target_scaler_path = hf_hub_download( | |
| repo_id="yohannes-07/lstm-blood-demand-predictor", | |
| filename="target_scaler.pkl", | |
| revision="main", | |
| force_download=True, | |
| cache_dir='/app/.cache' | |
| ) | |
| with open(target_scaler_path, 'rb') as f: | |
| target_scaler = pickle.load(f) | |
| except Exception as e: | |
| raise RuntimeError(f"Failed to load resources: {str(e)}") | |
| class PredictionRequest(BaseModel): | |
| future_features: List[List[float]] | |
| hospitals: List[str] | |
| blood_groups: List[str] | |
| class PredictionResponse(BaseModel): | |
| predictions: Dict[str, List[int]] | |
| def predict_next_n_days(last_sequence: np.ndarray, future_features: List[List[float]]) -> List[int]: | |
| """Predict blood demand for next N days using the last sequence""" | |
| if feature_scaler is None or target_scaler is None: | |
| raise RuntimeError("Scalers not loaded") | |
| feature_df = pd.DataFrame(np.array(future_features), columns=features) | |
| scaled_future_features = feature_scaler.transform(feature_df) | |
| predictions = [] | |
| current_sequence = last_sequence.copy() | |
| for day in range(DAYS_TO_PREDICT): | |
| input_seq = current_sequence.reshape(1, SEQUENCE_LENGTH, len(features)) | |
| pred = model.predict(input_seq, verbose=0)[0][0] | |
| predictions.append(pred) | |
| new_day_features = scaled_future_features[day] | |
| current_sequence = np.vstack([current_sequence[1:], new_day_features]) | |
| predictions = np.array(predictions).reshape(-1, 1) | |
| real_predictions = target_scaler.inverse_transform(predictions).flatten() | |
| return [int(round(x)) for x in real_predictions] | |
| async def root(): | |
| """Default endpoint that provides API information""" | |
| return { | |
| "message": "Blood Demand Prediction API", | |
| "endpoints": { | |
| "predict_all": { | |
| "method": "POST", | |
| "path": "/predict_all", | |
| "description": "Predict for multiple hospital-blood group pairs" | |
| }, | |
| "predict_single": { | |
| "method": "POST", | |
| "path": "/predict_single", | |
| "description": "Predict for a single hospital-blood group pair" | |
| } | |
| } | |
| } | |
| async def predict_all(request: PredictionRequest): | |
| """Predict blood demand for all hospital-blood group pairs""" | |
| if None in (model, last_sequences, feature_scaler, target_scaler): | |
| raise HTTPException(status_code=503, detail="Resources not loaded") | |
| results = {} | |
| for hospital in request.hospitals: | |
| for blood_group in request.blood_groups: | |
| hb_id = f"{hospital}_{blood_group}" | |
| if hb_id in last_sequences: | |
| try: | |
| predictions = predict_next_n_days( | |
| last_sequences[hb_id], | |
| request.future_features | |
| ) | |
| results[hb_id] = predictions | |
| except Exception as e: | |
| continue | |
| if not results: | |
| raise HTTPException( | |
| status_code=404, | |
| detail="No valid predictions could be made for any hospital-blood group pair" | |
| ) | |
| return {"predictions": results} | |
| async def predict_single(hospital: str, blood_group: str, request: PredictionRequest): | |
| """Predict blood demand for a specific hospital-blood group pair""" | |
| if None in (model, last_sequences, feature_scaler, target_scaler): | |
| raise HTTPException(status_code=503, detail="Resources not loaded") | |
| hb_id = f"{hospital}_{blood_group}" | |
| if hb_id not in last_sequences: | |
| raise HTTPException( | |
| status_code=404, | |
| detail=f"No historical data for {hospital} ({blood_group})" | |
| ) | |
| try: | |
| predictions = predict_next_n_days( | |
| last_sequences[hb_id], | |
| request.future_features | |
| ) | |
| return { | |
| "hospital": hospital, | |
| "blood_group": blood_group, | |
| "predictions": predictions | |
| } | |
| except Exception as e: | |
| raise HTTPException( | |
| status_code=500, | |
| detail=f"Prediction failed: {str(e)}" | |
| ) |