Spaces:
Runtime error
Runtime error
Aleksandr Maiorov
commited on
Commit
·
bfcae7e
1
Parent(s):
1f1b386
v 0.1
Browse files- добавлен llama-index
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
import logging
|
| 2 |
|
| 3 |
from fastapi import FastAPI
|
|
|
|
| 4 |
from llama_index.llms.llama_cpp import LlamaCPP
|
| 5 |
from transformers import AutoTokenizer
|
| 6 |
from llama_index.core import set_global_tokenizer
|
|
@@ -72,8 +73,19 @@ llm = LlamaCPP(
|
|
| 72 |
verbose=True,
|
| 73 |
)
|
| 74 |
|
|
|
|
| 75 |
index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
|
| 76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
def generate_response(completion_response):
|
| 79 |
try:
|
|
@@ -101,7 +113,7 @@ async def predict(text: str):
|
|
| 101 |
logger.info('ЗАПРОС:')
|
| 102 |
logger.info(text)
|
| 103 |
# response = llm.complete(text)
|
| 104 |
-
response =
|
| 105 |
logger.info('ОТВЕТ:')
|
| 106 |
logger.info(response)
|
| 107 |
# text_response = generate_response(response)
|
|
|
|
| 1 |
import logging
|
| 2 |
|
| 3 |
from fastapi import FastAPI
|
| 4 |
+
from llama_index.core.memory import ChatMemoryBuffer
|
| 5 |
from llama_index.llms.llama_cpp import LlamaCPP
|
| 6 |
from transformers import AutoTokenizer
|
| 7 |
from llama_index.core import set_global_tokenizer
|
|
|
|
| 73 |
verbose=True,
|
| 74 |
)
|
| 75 |
|
| 76 |
+
memory = ChatMemoryBuffer.from_defaults(token_limit=3900)
|
| 77 |
index = VectorStoreIndex.from_documents(documents, embed_model=embed_model)
|
| 78 |
+
chat_engine = index.as_chat_engine(
|
| 79 |
+
chat_mode="condense_plus_context",
|
| 80 |
+
memory=memory,
|
| 81 |
+
llm=llm,
|
| 82 |
+
context_propt=(
|
| 83 |
+
"Вы - чат-бот, способный нормально взаимодействовать.\n"
|
| 84 |
+
"Вот соответствующие документы для данного контекста:\n"
|
| 85 |
+
"{context_str}"
|
| 86 |
+
"\nInstruction: Используйте предыдущую историю чата или приведенный выше контекст, чтобы взаимодействовать с пользователем и помогать ему."
|
| 87 |
+
)
|
| 88 |
+
)
|
| 89 |
|
| 90 |
def generate_response(completion_response):
|
| 91 |
try:
|
|
|
|
| 113 |
logger.info('ЗАПРОС:')
|
| 114 |
logger.info(text)
|
| 115 |
# response = llm.complete(text)
|
| 116 |
+
response = chat_engine.chat(text)
|
| 117 |
logger.info('ОТВЕТ:')
|
| 118 |
logger.info(response)
|
| 119 |
# text_response = generate_response(response)
|