Spaces:
Paused
Paused
chore(repo): add model artifacts tracked by Git LFS; add scripts\n\n- enable model/** via .gitignore override\n- ensure Git LFS patterns for *.safetensors and other ML artifacts\n- add testing scripts for HF Space and curl
Browse files- .gitignore +0 -13
- scripts/test_hf_dots_ocr_curl.sh +56 -0
- scripts/test_hf_dots_ocr_space.py +215 -0
.gitignore
CHANGED
|
@@ -128,19 +128,6 @@ dmypy.json
|
|
| 128 |
# Pyre type checker
|
| 129 |
.pyre/
|
| 130 |
|
| 131 |
-
# ML/AI specific
|
| 132 |
-
# Model files and checkpoints
|
| 133 |
-
*.pth
|
| 134 |
-
*.pt
|
| 135 |
-
*.pkl
|
| 136 |
-
*.pickle
|
| 137 |
-
*.h5
|
| 138 |
-
*.hdf5
|
| 139 |
-
*.joblib
|
| 140 |
-
*.model
|
| 141 |
-
*.ckpt
|
| 142 |
-
*.safetensors
|
| 143 |
-
|
| 144 |
# Data files
|
| 145 |
*.csv
|
| 146 |
*.json
|
|
|
|
| 128 |
# Pyre type checker
|
| 129 |
.pyre/
|
| 130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
# Data files
|
| 132 |
*.csv
|
| 133 |
*.json
|
scripts/test_hf_dots_ocr_curl.sh
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# Test script for Hugging Face Dots-OCR Space API using curl
|
| 3 |
+
|
| 4 |
+
HF_SPACE_URL="https://algoryn-dots-ocr-idcard.hf.space"
|
| 5 |
+
|
| 6 |
+
echo "π Testing Hugging Face Dots-OCR Space API"
|
| 7 |
+
echo "=========================================="
|
| 8 |
+
|
| 9 |
+
# Test 1: Health check
|
| 10 |
+
echo ""
|
| 11 |
+
echo "1οΈβ£ Testing health check..."
|
| 12 |
+
curl -s "${HF_SPACE_URL}/health" | jq '.' 2>/dev/null || curl -s "${HF_SPACE_URL}/health"
|
| 13 |
+
|
| 14 |
+
# Test 2: OCR extraction (if demo image exists)
|
| 15 |
+
DEMO_IMAGE="data/demo/tom_id_card_front.jpg"
|
| 16 |
+
if [ -f "$DEMO_IMAGE" ]; then
|
| 17 |
+
echo ""
|
| 18 |
+
echo "2οΈβ£ Testing OCR extraction with $DEMO_IMAGE..."
|
| 19 |
+
curl -X POST "${HF_SPACE_URL}/v1/id/ocr" \
|
| 20 |
+
-F "file=@$DEMO_IMAGE" | jq '.' 2>/dev/null || echo "Response received (install jq for pretty JSON)"
|
| 21 |
+
|
| 22 |
+
# Test 3: OCR with ROI (top half of image)
|
| 23 |
+
echo ""
|
| 24 |
+
echo "3οΈβ£ Testing OCR with ROI (top half)..."
|
| 25 |
+
curl -X POST "${HF_SPACE_URL}/v1/id/ocr" \
|
| 26 |
+
-F "file=@$DEMO_IMAGE" \
|
| 27 |
+
-F 'roi={"x1": 0.0, "y1": 0.0, "x2": 1.0, "y2": 0.5}' | jq '.' 2>/dev/null || echo "Response received (install jq for pretty JSON)"
|
| 28 |
+
|
| 29 |
+
# Test 4: OCR with ROI (center region)
|
| 30 |
+
echo ""
|
| 31 |
+
echo "4οΈβ£ Testing OCR with ROI (center region)..."
|
| 32 |
+
curl -X POST "${HF_SPACE_URL}/v1/id/ocr" \
|
| 33 |
+
-F "file=@$DEMO_IMAGE" \
|
| 34 |
+
-F 'roi={"x1": 0.25, "y1": 0.25, "x2": 0.75, "y2": 0.75}' | jq '.' 2>/dev/null || echo "Response received (install jq for pretty JSON)"
|
| 35 |
+
else
|
| 36 |
+
echo ""
|
| 37 |
+
echo "2οΈβ£ No demo image found at $DEMO_IMAGE"
|
| 38 |
+
echo "π‘ Place a test image in the data/demo/ directory"
|
| 39 |
+
fi
|
| 40 |
+
|
| 41 |
+
echo ""
|
| 42 |
+
echo "π Testing complete!"
|
| 43 |
+
echo ""
|
| 44 |
+
echo "π‘ To test with your own files:"
|
| 45 |
+
echo " curl -X POST \"${HF_SPACE_URL}/v1/id/ocr\" -F \"file=@your_image.jpg\""
|
| 46 |
+
echo ""
|
| 47 |
+
echo "π‘ To test with ROI:"
|
| 48 |
+
echo " curl -X POST \"${HF_SPACE_URL}/v1/id/ocr\" -F \"file=@your_image.jpg\" -F 'roi={\"x1\": 0.0, \"y1\": 0.0, \"x2\": 0.5, \"y2\": 0.5}'"
|
| 49 |
+
echo ""
|
| 50 |
+
echo "π‘ Available ROI examples:"
|
| 51 |
+
echo " Full image: no roi parameter"
|
| 52 |
+
echo " Top half: {\"x1\": 0.0, \"y1\": 0.0, \"x2\": 1.0, \"y2\": 0.5}"
|
| 53 |
+
echo " Bottom half: {\"x1\": 0.0, \"y1\": 0.5, \"x2\": 1.0, \"y2\": 1.0}"
|
| 54 |
+
echo " Center: {\"x1\": 0.25, \"y1\": 0.25, \"x2\": 0.75, \"y2\": 0.75}"
|
| 55 |
+
echo " Left side: {\"x1\": 0.0, \"y1\": 0.0, \"x2\": 0.5, \"y2\": 1.0}"
|
| 56 |
+
echo " Right side: {\"x1\": 0.5, \"y1\": 0.0, \"x2\": 1.0, \"y2\": 1.0}"
|
scripts/test_hf_dots_ocr_space.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Test script for Hugging Face Dots-OCR Space API
|
| 4 |
+
|
| 5 |
+
This script demonstrates how to interact with your deployed Dots-OCR Space
|
| 6 |
+
at https://algoryn-dots-ocr-idcard.hf.space
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import requests
|
| 10 |
+
import json
|
| 11 |
+
import time
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
from typing import Optional, Dict, Any, List
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class HFDotsOCRClient:
|
| 17 |
+
"""Client for interacting with the Hugging Face Dots-OCR Space API."""
|
| 18 |
+
|
| 19 |
+
def __init__(self, base_url: str = "https://algoryn-dots-ocr-idcard.hf.space"):
|
| 20 |
+
"""Initialize the client with the Space URL."""
|
| 21 |
+
self.base_url = base_url.rstrip('/')
|
| 22 |
+
self.session = requests.Session()
|
| 23 |
+
# Set a reasonable timeout for HF Spaces
|
| 24 |
+
self.session.timeout = 60
|
| 25 |
+
|
| 26 |
+
def health_check(self) -> Dict[str, Any]:
|
| 27 |
+
"""Check if the Space is healthy and running."""
|
| 28 |
+
try:
|
| 29 |
+
response = self.session.get(f"{self.base_url}/health")
|
| 30 |
+
response.raise_for_status()
|
| 31 |
+
return response.json()
|
| 32 |
+
except requests.exceptions.RequestException as e:
|
| 33 |
+
return {"error": f"Health check failed: {e}"}
|
| 34 |
+
|
| 35 |
+
def extract_text(
|
| 36 |
+
self,
|
| 37 |
+
image_path: str,
|
| 38 |
+
roi: Optional[Dict[str, float]] = None
|
| 39 |
+
) -> Dict[str, Any]:
|
| 40 |
+
"""Extract text from an identity document image.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
image_path: Path to the image file
|
| 44 |
+
roi: Optional region of interest as dict with x1, y1, x2, y2 (0-1 normalized)
|
| 45 |
+
"""
|
| 46 |
+
try:
|
| 47 |
+
with open(image_path, 'rb') as f:
|
| 48 |
+
files = {'file': f}
|
| 49 |
+
data = {}
|
| 50 |
+
|
| 51 |
+
# Add ROI if provided
|
| 52 |
+
if roi:
|
| 53 |
+
data['roi'] = json.dumps(roi)
|
| 54 |
+
|
| 55 |
+
response = self.session.post(
|
| 56 |
+
f"{self.base_url}/v1/id/ocr",
|
| 57 |
+
files=files,
|
| 58 |
+
data=data
|
| 59 |
+
)
|
| 60 |
+
response.raise_for_status()
|
| 61 |
+
return response.json()
|
| 62 |
+
|
| 63 |
+
except requests.exceptions.RequestException as e:
|
| 64 |
+
return {"error": f"OCR extraction failed: {e}"}
|
| 65 |
+
except FileNotFoundError:
|
| 66 |
+
return {"error": f"Image file not found: {image_path}"}
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def print_ocr_results(result: Dict[str, Any]) -> None:
|
| 70 |
+
"""Pretty print OCR extraction results."""
|
| 71 |
+
if "error" in result:
|
| 72 |
+
print(f"β Error: {result['error']}")
|
| 73 |
+
return
|
| 74 |
+
|
| 75 |
+
print(f"β
Request ID: {result.get('request_id', 'N/A')}")
|
| 76 |
+
print(f"π Media Type: {result.get('media_type', 'N/A')}")
|
| 77 |
+
print(f"β±οΈ Processing Time: {result.get('processing_time', 0):.2f}s")
|
| 78 |
+
|
| 79 |
+
detections = result.get('detections', [])
|
| 80 |
+
print(f"π OCR Detections: {len(detections)}")
|
| 81 |
+
|
| 82 |
+
for i, detection in enumerate(detections, 1):
|
| 83 |
+
print(f"\nπ Detection {i}:")
|
| 84 |
+
|
| 85 |
+
# Print MRZ data if available
|
| 86 |
+
mrz_data = detection.get('mrz_data')
|
| 87 |
+
if mrz_data:
|
| 88 |
+
print(f" π MRZ Data:")
|
| 89 |
+
print(f" Format: {mrz_data.get('format_type', 'N/A')}")
|
| 90 |
+
print(f" Valid: {mrz_data.get('is_valid', False)}")
|
| 91 |
+
print(f" Confidence: {mrz_data.get('confidence', 0):.3f}")
|
| 92 |
+
if mrz_data.get('raw_text'):
|
| 93 |
+
print(f" Raw Text: {mrz_data['raw_text'][:50]}...")
|
| 94 |
+
else:
|
| 95 |
+
print(f" π MRZ Data: None detected")
|
| 96 |
+
|
| 97 |
+
# Print extracted fields
|
| 98 |
+
extracted_fields = detection.get('extracted_fields', {})
|
| 99 |
+
print(f" π Extracted Fields:")
|
| 100 |
+
|
| 101 |
+
# Define field categories for better organization
|
| 102 |
+
field_categories = {
|
| 103 |
+
"Document Info": [
|
| 104 |
+
"document_number", "document_type", "issuing_country", "issuing_authority"
|
| 105 |
+
],
|
| 106 |
+
"Personal Info": [
|
| 107 |
+
"surname", "given_names", "nationality", "date_of_birth",
|
| 108 |
+
"gender", "place_of_birth"
|
| 109 |
+
],
|
| 110 |
+
"Validity Info": [
|
| 111 |
+
"date_of_issue", "date_of_expiry", "personal_number"
|
| 112 |
+
],
|
| 113 |
+
"Additional": [
|
| 114 |
+
"optional_data_1", "optional_data_2"
|
| 115 |
+
]
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
for category, fields in field_categories.items():
|
| 119 |
+
category_fields = []
|
| 120 |
+
for field_name in fields:
|
| 121 |
+
field_data = extracted_fields.get(field_name)
|
| 122 |
+
if field_data and field_data.get('value'):
|
| 123 |
+
category_fields.append(f"{field_name}: {field_data['value']} ({field_data.get('confidence', 0):.2f})")
|
| 124 |
+
|
| 125 |
+
if category_fields:
|
| 126 |
+
print(f" {category}:")
|
| 127 |
+
for field in category_fields:
|
| 128 |
+
print(f" β’ {field}")
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def test_with_roi(client: HFDotsOCRClient, image_path: str) -> None:
|
| 132 |
+
"""Test OCR with different ROI regions."""
|
| 133 |
+
print(f"\nπ― Testing with different ROI regions...")
|
| 134 |
+
|
| 135 |
+
# Define different ROI regions to test
|
| 136 |
+
roi_regions = {
|
| 137 |
+
"Full Image": None,
|
| 138 |
+
"Top Half": {"x1": 0.0, "y1": 0.0, "x2": 1.0, "y2": 0.5},
|
| 139 |
+
"Bottom Half": {"x1": 0.0, "y1": 0.5, "x2": 1.0, "y2": 1.0},
|
| 140 |
+
"Center Region": {"x1": 0.25, "y1": 0.25, "x2": 0.75, "y2": 0.75},
|
| 141 |
+
"Left Side": {"x1": 0.0, "y1": 0.0, "x2": 0.5, "y2": 1.0},
|
| 142 |
+
"Right Side": {"x1": 0.5, "y1": 0.0, "x2": 1.0, "y2": 1.0}
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
for region_name, roi in roi_regions.items():
|
| 146 |
+
print(f"\nπ Testing {region_name}...")
|
| 147 |
+
result = client.extract_text(image_path, roi)
|
| 148 |
+
|
| 149 |
+
if "error" not in result:
|
| 150 |
+
print(f" β
Success - Processing time: {result.get('processing_time', 0):.2f}s")
|
| 151 |
+
# Show a summary of extracted fields
|
| 152 |
+
detections = result.get('detections', [])
|
| 153 |
+
if detections:
|
| 154 |
+
fields = detections[0].get('extracted_fields', {})
|
| 155 |
+
field_count = sum(1 for field in fields.values() if field and field.get('value'))
|
| 156 |
+
print(f" π Extracted {field_count} fields")
|
| 157 |
+
else:
|
| 158 |
+
print(f" β Error: {result['error']}")
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def main():
|
| 162 |
+
"""Main test function."""
|
| 163 |
+
print("π Testing Hugging Face Dots-OCR Space API")
|
| 164 |
+
print("=" * 50)
|
| 165 |
+
|
| 166 |
+
# Initialize client
|
| 167 |
+
client = HFDotsOCRClient()
|
| 168 |
+
|
| 169 |
+
# Test 1: Health check
|
| 170 |
+
print("\n1οΈβ£ Testing health check...")
|
| 171 |
+
health = client.health_check()
|
| 172 |
+
if "error" in health:
|
| 173 |
+
print(f"β Health check failed: {health['error']}")
|
| 174 |
+
print("π‘ Make sure your Hugging Face Space is running and accessible")
|
| 175 |
+
return
|
| 176 |
+
else:
|
| 177 |
+
print(f"β
Space is healthy: {health}")
|
| 178 |
+
|
| 179 |
+
# Test 2: OCR extraction (if demo images exist)
|
| 180 |
+
demo_images = [
|
| 181 |
+
"data/demo/tom_id_card_front.jpg",
|
| 182 |
+
"data/demo/tom_id_card_back.jpg",
|
| 183 |
+
"data/demo/ocr/0000095097_1_E-5858-MA Fahrzeugschein und -brief.png",
|
| 184 |
+
"data/demo/ocr/container_inspection_report.png",
|
| 185 |
+
"data/demo/ocr/handelsregister_b.png"
|
| 186 |
+
]
|
| 187 |
+
|
| 188 |
+
test_image = None
|
| 189 |
+
for image_path in demo_images:
|
| 190 |
+
if Path(image_path).exists():
|
| 191 |
+
test_image = image_path
|
| 192 |
+
break
|
| 193 |
+
|
| 194 |
+
if test_image:
|
| 195 |
+
print(f"\n2οΈβ£ Testing OCR extraction with {test_image}...")
|
| 196 |
+
result = client.extract_text(test_image)
|
| 197 |
+
print_ocr_results(result)
|
| 198 |
+
|
| 199 |
+
# Test 3: ROI testing
|
| 200 |
+
if "error" not in result:
|
| 201 |
+
test_with_roi(client, test_image)
|
| 202 |
+
else:
|
| 203 |
+
print("\n2οΈβ£ No demo images found for testing")
|
| 204 |
+
print("π‘ Place some test images in the data/demo/ directory")
|
| 205 |
+
|
| 206 |
+
print("\nπ Testing complete!")
|
| 207 |
+
print("\nπ‘ To test with your own files:")
|
| 208 |
+
print(" python test_hf_dots_ocr_space.py")
|
| 209 |
+
print("\nπ‘ To test with ROI:")
|
| 210 |
+
print(" client = HFDotsOCRClient()")
|
| 211 |
+
print(" result = client.extract_text('image.jpg', roi={'x1': 0.0, 'y1': 0.0, 'x2': 0.5, 'y2': 0.5})")
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
if __name__ == "__main__":
|
| 215 |
+
main()
|