Update funcs.py
Browse files
funcs.py
CHANGED
|
@@ -10,7 +10,7 @@ from PIL import Image
|
|
| 10 |
import os
|
| 11 |
from datetime import datetime
|
| 12 |
from openai import OpenAI
|
| 13 |
-
from ai71 import AI71
|
| 14 |
|
| 15 |
if torch.cuda.is_available():
|
| 16 |
model = model.to('cuda')
|
|
@@ -22,8 +22,8 @@ with open ('emotion_group_labels.txt') as file:
|
|
| 22 |
embed_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
|
| 23 |
classifier = pipeline("zero-shot-classification", model ='facebook/bart-large-mnli')
|
| 24 |
|
| 25 |
-
AI71_API_KEY = os.getenv('AI71_API_KEY')
|
| 26 |
-
print(AI71_API_KEY)
|
| 27 |
huggingface_token = os.getenv('hf_token')
|
| 28 |
print("hf-token:",huggingface_token)
|
| 29 |
|
|
@@ -96,7 +96,6 @@ def get_doc_response_emotions(user_message, therapy_session_conversation):
|
|
| 96 |
print(f"therapy_session_conversation: {therapy_session_conversation}")
|
| 97 |
return '', therapy_session_conversation, emotions_msg
|
| 98 |
|
| 99 |
-
from datetime import datetime
|
| 100 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 101 |
import torch
|
| 102 |
|
|
|
|
| 10 |
import os
|
| 11 |
from datetime import datetime
|
| 12 |
from openai import OpenAI
|
| 13 |
+
# from ai71 import AI71
|
| 14 |
|
| 15 |
if torch.cuda.is_available():
|
| 16 |
model = model.to('cuda')
|
|
|
|
| 22 |
embed_model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
|
| 23 |
classifier = pipeline("zero-shot-classification", model ='facebook/bart-large-mnli')
|
| 24 |
|
| 25 |
+
# AI71_API_KEY = os.getenv('AI71_API_KEY')
|
| 26 |
+
# print(AI71_API_KEY)
|
| 27 |
huggingface_token = os.getenv('hf_token')
|
| 28 |
print("hf-token:",huggingface_token)
|
| 29 |
|
|
|
|
| 96 |
print(f"therapy_session_conversation: {therapy_session_conversation}")
|
| 97 |
return '', therapy_session_conversation, emotions_msg
|
| 98 |
|
|
|
|
| 99 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 100 |
import torch
|
| 101 |
|