Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -25,7 +25,7 @@ from transformers.image_utils import load_image
|
|
| 25 |
|
| 26 |
# Constants for text generation
|
| 27 |
MAX_MAX_NEW_TOKENS = 2048
|
| 28 |
-
DEFAULT_MAX_NEW_TOKENS =
|
| 29 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
| 30 |
|
| 31 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
@@ -291,7 +291,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
| 291 |
markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
|
| 292 |
|
| 293 |
model_choice = gr.Radio(
|
| 294 |
-
choices=["
|
| 295 |
label="Select Model",
|
| 296 |
value="docscopeOCR-7B-050425-exp"
|
| 297 |
)
|
|
|
|
| 25 |
|
| 26 |
# Constants for text generation
|
| 27 |
MAX_MAX_NEW_TOKENS = 2048
|
| 28 |
+
DEFAULT_MAX_NEW_TOKENS = 1280
|
| 29 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
| 30 |
|
| 31 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
|
|
| 291 |
markdown_output = gr.Markdown(label="Formatted Result (Result.Md)")
|
| 292 |
|
| 293 |
model_choice = gr.Radio(
|
| 294 |
+
choices=["docscopeOCR-7B-050425-exp", "MonkeyOCR-Recognition", "coreOCR-7B-050325-preview", "GLM-4.1V-9B-Thinking"],
|
| 295 |
label="Select Model",
|
| 296 |
value="docscopeOCR-7B-050425-exp"
|
| 297 |
)
|