Spaces:
Sleeping
Sleeping
added access_token
Browse files
app.py
CHANGED
|
@@ -2,7 +2,9 @@ from fastapi import FastAPI, UploadFile, File
|
|
| 2 |
from transformers import pipeline
|
| 3 |
from fastai.vision.all import *
|
| 4 |
from PIL import Image
|
|
|
|
| 5 |
|
|
|
|
| 6 |
# NOTE - we configure docs_url to serve the interactive Docs at the root path
|
| 7 |
# of the app. This way, we can use the docs as a landing page for the app on Spaces.
|
| 8 |
app = FastAPI(docs_url="/")
|
|
@@ -58,10 +60,10 @@ llama_pipeline = pipeline(
|
|
| 58 |
model=llama_model_id,
|
| 59 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
| 60 |
device_map="auto",
|
|
|
|
| 61 |
)
|
| 62 |
|
| 63 |
|
| 64 |
-
"""
|
| 65 |
@app.post("/frame-details")
|
| 66 |
def frame_details(text: str):
|
| 67 |
|
|
@@ -99,8 +101,3 @@ def frame_details(text: str):
|
|
| 99 |
|
| 100 |
return extracted_info
|
| 101 |
|
| 102 |
-
if __name__ == "__main__":
|
| 103 |
-
import uvicorn
|
| 104 |
-
uvicorn.run(app, host="0.0.0.0", port=8000)
|
| 105 |
-
|
| 106 |
-
"""
|
|
|
|
| 2 |
from transformers import pipeline
|
| 3 |
from fastai.vision.all import *
|
| 4 |
from PIL import Image
|
| 5 |
+
import os
|
| 6 |
|
| 7 |
+
access_token = os.getenv("access_token")
|
| 8 |
# NOTE - we configure docs_url to serve the interactive Docs at the root path
|
| 9 |
# of the app. This way, we can use the docs as a landing page for the app on Spaces.
|
| 10 |
app = FastAPI(docs_url="/")
|
|
|
|
| 60 |
model=llama_model_id,
|
| 61 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
| 62 |
device_map="auto",
|
| 63 |
+
auth_token=access_token
|
| 64 |
)
|
| 65 |
|
| 66 |
|
|
|
|
| 67 |
@app.post("/frame-details")
|
| 68 |
def frame_details(text: str):
|
| 69 |
|
|
|
|
| 101 |
|
| 102 |
return extracted_info
|
| 103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|