improve sync
Browse files
app_modules/llm_inference.py
CHANGED
|
@@ -38,7 +38,8 @@ class LLMInference(metaclass=abc.ABCMeta):
|
|
| 38 |
self, inputs, streaming_handler, q: Queue = None, tracing: bool = False
|
| 39 |
):
|
| 40 |
print(inputs)
|
| 41 |
-
self.llm_loader.
|
|
|
|
| 42 |
|
| 43 |
try:
|
| 44 |
self.llm_loader.streamer.reset(q)
|
|
@@ -67,7 +68,8 @@ class LLMInference(metaclass=abc.ABCMeta):
|
|
| 67 |
|
| 68 |
return result
|
| 69 |
finally:
|
| 70 |
-
self.llm_loader.
|
|
|
|
| 71 |
|
| 72 |
def _execute_chain(self, chain, inputs, q, sh):
|
| 73 |
q.put(chain(inputs, callbacks=[sh]))
|
|
|
|
| 38 |
self, inputs, streaming_handler, q: Queue = None, tracing: bool = False
|
| 39 |
):
|
| 40 |
print(inputs)
|
| 41 |
+
if self.llm_loader.streamer.for_huggingface:
|
| 42 |
+
self.llm_loader.lock.acquire()
|
| 43 |
|
| 44 |
try:
|
| 45 |
self.llm_loader.streamer.reset(q)
|
|
|
|
| 68 |
|
| 69 |
return result
|
| 70 |
finally:
|
| 71 |
+
if self.llm_loader.streamer.for_huggingface:
|
| 72 |
+
self.llm_loader.lock.release()
|
| 73 |
|
| 74 |
def _execute_chain(self, chain, inputs, q, sh):
|
| 75 |
q.put(chain(inputs, callbacks=[sh]))
|