Update CXR_LLAVA_HF.py
Browse files- CXR_LLAVA_HF.py +2 -1
CXR_LLAVA_HF.py
CHANGED
|
@@ -584,6 +584,7 @@ class CXRLLAVAModel(PreTrainedModel):
|
|
| 584 |
]
|
| 585 |
response = self.generate_cxr_repsonse(chat=chat, image=image, temperature=temperature, top_p=top_p)
|
| 586 |
return response
|
|
|
|
| 587 |
def ask_question(self, question, image, temperature=0.2, top_p=0.8):
|
| 588 |
chat = [
|
| 589 |
{"role": "system",
|
|
@@ -596,7 +597,7 @@ class CXRLLAVAModel(PreTrainedModel):
|
|
| 596 |
|
| 597 |
def generate_cxr_repsonse(self, chat, image, temperature=0.2, top_p=0.8):
|
| 598 |
with torch.no_grad():
|
| 599 |
-
streamer = TextIteratorStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=
|
| 600 |
|
| 601 |
if np.array(image).max()>255:
|
| 602 |
raise Exception("16-bit image is not supported.")
|
|
|
|
| 584 |
]
|
| 585 |
response = self.generate_cxr_repsonse(chat=chat, image=image, temperature=temperature, top_p=top_p)
|
| 586 |
return response
|
| 587 |
+
|
| 588 |
def ask_question(self, question, image, temperature=0.2, top_p=0.8):
|
| 589 |
chat = [
|
| 590 |
{"role": "system",
|
|
|
|
| 597 |
|
| 598 |
def generate_cxr_repsonse(self, chat, image, temperature=0.2, top_p=0.8):
|
| 599 |
with torch.no_grad():
|
| 600 |
+
streamer = TextIteratorStreamer(self.tokenizer, skip_prompt=True, skip_special_tokens=True, timeout=90)
|
| 601 |
|
| 602 |
if np.array(image).max()>255:
|
| 603 |
raise Exception("16-bit image is not supported.")
|