improve unit tests
Browse files- .env.example +3 -0
- Makefile +1 -1
- test.py +3 -3
.env.example
CHANGED
|
@@ -67,6 +67,9 @@ LLAMACPP_DOWNLOAD_LINK=https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/reso
|
|
| 67 |
# CHROMADB_INDEX_PATH="./data/chromadb_1024_512/"
|
| 68 |
FAISS_INDEX_PATH="./data/faiss_1024_512/"
|
| 69 |
|
|
|
|
|
|
|
|
|
|
| 70 |
QUESTIONS_FILE_PATH="./data/questions.txt"
|
| 71 |
|
| 72 |
TOKENIZERS_PARALLELISM=true
|
|
|
|
| 67 |
# CHROMADB_INDEX_PATH="./data/chromadb_1024_512/"
|
| 68 |
FAISS_INDEX_PATH="./data/faiss_1024_512/"
|
| 69 |
|
| 70 |
+
CHAT_QUESTION="What's the capital city of Malaysia?"
|
| 71 |
+
QA_QUESTION="What's deep learning?"
|
| 72 |
+
|
| 73 |
QUESTIONS_FILE_PATH="./data/questions.txt"
|
| 74 |
|
| 75 |
TOKENIZERS_PARALLELISM=true
|
Makefile
CHANGED
|
@@ -10,7 +10,7 @@ else
|
|
| 10 |
endif
|
| 11 |
|
| 12 |
test:
|
| 13 |
-
PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0 python test.py
|
| 14 |
|
| 15 |
chat:
|
| 16 |
python test.py chat
|
|
|
|
| 10 |
endif
|
| 11 |
|
| 12 |
test:
|
| 13 |
+
PYTORCH_MPS_HIGH_WATERMARK_RATIO=0.0 python test.py $(TEST)
|
| 14 |
|
| 15 |
chat:
|
| 16 |
python test.py chat
|
test.py
CHANGED
|
@@ -14,7 +14,7 @@ from app_modules.utils import get_device_types, print_llm_response
|
|
| 14 |
|
| 15 |
|
| 16 |
class TestLLMLoader(unittest.TestCase):
|
| 17 |
-
question = "
|
| 18 |
|
| 19 |
def run_test_case(self, llm_model_type, query):
|
| 20 |
n_threds = int(os.environ.get("NUMBER_OF_CPU_CORES") or "4")
|
|
@@ -52,7 +52,7 @@ class TestLLMLoader(unittest.TestCase):
|
|
| 52 |
|
| 53 |
|
| 54 |
class TestChatChain(unittest.TestCase):
|
| 55 |
-
question = "
|
| 56 |
|
| 57 |
def run_test_case(self, llm_model_type, query):
|
| 58 |
n_threds = int(os.environ.get("NUMBER_OF_CPU_CORES") or "4")
|
|
@@ -97,7 +97,7 @@ class TestChatChain(unittest.TestCase):
|
|
| 97 |
|
| 98 |
class TestQAChain(unittest.TestCase):
|
| 99 |
qa_chain: any
|
| 100 |
-
question = "
|
| 101 |
|
| 102 |
def run_test_case(self, llm_model_type, query):
|
| 103 |
start = timer()
|
|
|
|
| 14 |
|
| 15 |
|
| 16 |
class TestLLMLoader(unittest.TestCase):
|
| 17 |
+
question = os.environ.get("CHAT_QUESTION")
|
| 18 |
|
| 19 |
def run_test_case(self, llm_model_type, query):
|
| 20 |
n_threds = int(os.environ.get("NUMBER_OF_CPU_CORES") or "4")
|
|
|
|
| 52 |
|
| 53 |
|
| 54 |
class TestChatChain(unittest.TestCase):
|
| 55 |
+
question = os.environ.get("CHAT_QUESTION")
|
| 56 |
|
| 57 |
def run_test_case(self, llm_model_type, query):
|
| 58 |
n_threds = int(os.environ.get("NUMBER_OF_CPU_CORES") or "4")
|
|
|
|
| 97 |
|
| 98 |
class TestQAChain(unittest.TestCase):
|
| 99 |
qa_chain: any
|
| 100 |
+
question = os.environ.get("QA_QUESTION")
|
| 101 |
|
| 102 |
def run_test_case(self, llm_model_type, query):
|
| 103 |
start = timer()
|