Spaces:
Sleeping
Sleeping
Upload 20 files
Browse files- .gitignore +60 -0
- DEPLOYMENT_GUIDE.md +341 -0
- Dockerfile +42 -0
- QUICKSTART.md +113 -0
- README.md +259 -10
- analyze_document.py +352 -0
- app.py +403 -0
- extract_drivers_license.py +353 -0
- extract_national_id.py +364 -0
- extract_nbi_ocr.py +200 -0
- extract_passport.py +459 -0
- extract_phic.py +405 -0
- extract_police_ocr.py +812 -0
- extract_postal.py +419 -0
- extract_prc.py +377 -0
- extract_sss.py +216 -0
- extract_tesda_ocr.py +331 -0
- extract_umid.py +317 -0
- requirements.txt +14 -0
- test_api.py +145 -0
.gitignore
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
.Python
|
| 7 |
+
env/
|
| 8 |
+
venv/
|
| 9 |
+
ENV/
|
| 10 |
+
build/
|
| 11 |
+
develop-eggs/
|
| 12 |
+
dist/
|
| 13 |
+
downloads/
|
| 14 |
+
eggs/
|
| 15 |
+
.eggs/
|
| 16 |
+
lib/
|
| 17 |
+
lib64/
|
| 18 |
+
parts/
|
| 19 |
+
sdist/
|
| 20 |
+
var/
|
| 21 |
+
wheels/
|
| 22 |
+
*.egg-info/
|
| 23 |
+
.installed.cfg
|
| 24 |
+
*.egg
|
| 25 |
+
|
| 26 |
+
# PaddleOCR
|
| 27 |
+
.paddleocr/
|
| 28 |
+
inference/
|
| 29 |
+
*.pdparams
|
| 30 |
+
*.pdopt
|
| 31 |
+
*.pdmodel
|
| 32 |
+
|
| 33 |
+
# Temporary files
|
| 34 |
+
temp_*.jpg
|
| 35 |
+
temp_*.png
|
| 36 |
+
*.tmp
|
| 37 |
+
|
| 38 |
+
# IDE
|
| 39 |
+
.vscode/
|
| 40 |
+
.idea/
|
| 41 |
+
*.swp
|
| 42 |
+
*.swo
|
| 43 |
+
*~
|
| 44 |
+
|
| 45 |
+
# OS
|
| 46 |
+
.DS_Store
|
| 47 |
+
Thumbs.db
|
| 48 |
+
|
| 49 |
+
# Logs
|
| 50 |
+
*.log
|
| 51 |
+
logs/
|
| 52 |
+
|
| 53 |
+
# Environment variables
|
| 54 |
+
.env
|
| 55 |
+
.env.local
|
| 56 |
+
|
| 57 |
+
# Output
|
| 58 |
+
output/
|
| 59 |
+
results/
|
| 60 |
+
|
DEPLOYMENT_GUIDE.md
ADDED
|
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Deployment Guide for Hugging Face Spaces
|
| 2 |
+
|
| 3 |
+
This guide will help you deploy the HandyHome OCR API to Hugging Face Spaces.
|
| 4 |
+
|
| 5 |
+
## Prerequisites
|
| 6 |
+
|
| 7 |
+
- A Hugging Face account (free at https://huggingface.co/join)
|
| 8 |
+
- Git installed on your machine (optional, for command-line deployment)
|
| 9 |
+
|
| 10 |
+
## Deployment Options
|
| 11 |
+
|
| 12 |
+
### Option 1: Web UI Deployment (Easiest)
|
| 13 |
+
|
| 14 |
+
#### Step 1: Create a New Space
|
| 15 |
+
|
| 16 |
+
1. Go to https://huggingface.co/new-space
|
| 17 |
+
2. Fill in the details:
|
| 18 |
+
- **Owner**: Your username
|
| 19 |
+
- **Space name**: `handyhome-ocr-api` (or any name you prefer)
|
| 20 |
+
- **License**: MIT
|
| 21 |
+
- **Select the Space SDK**: Choose **Docker**
|
| 22 |
+
- **Space hardware**: Start with **CPU basic** (free tier)
|
| 23 |
+
- **Visibility**: Choose Public or Private
|
| 24 |
+
|
| 25 |
+
3. Click **Create Space**
|
| 26 |
+
|
| 27 |
+
#### Step 2: Upload Files via Web UI
|
| 28 |
+
|
| 29 |
+
1. In your new Space, click **Files** tab
|
| 30 |
+
2. Click **Add file** → **Upload files**
|
| 31 |
+
3. Upload the following files from the `huggingface-ocr` folder:
|
| 32 |
+
```
|
| 33 |
+
app.py
|
| 34 |
+
requirements.txt
|
| 35 |
+
Dockerfile
|
| 36 |
+
README.md
|
| 37 |
+
.gitignore
|
| 38 |
+
extract_national_id.py
|
| 39 |
+
extract_drivers_license.py
|
| 40 |
+
extract_prc.py
|
| 41 |
+
extract_umid.py
|
| 42 |
+
extract_sss.py
|
| 43 |
+
extract_passport.py
|
| 44 |
+
extract_postal.py
|
| 45 |
+
extract_phic.py
|
| 46 |
+
extract_nbi_ocr.py
|
| 47 |
+
extract_police_ocr.py
|
| 48 |
+
extract_tesda_ocr.py
|
| 49 |
+
analyze_document.py
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
4. Click **Commit changes to main**
|
| 53 |
+
|
| 54 |
+
#### Step 3: Wait for Build
|
| 55 |
+
|
| 56 |
+
1. Go to the **App** tab
|
| 57 |
+
2. You'll see the build progress
|
| 58 |
+
3. Initial build takes **5-10 minutes** due to:
|
| 59 |
+
- Installing PaddleOCR and dependencies
|
| 60 |
+
- Downloading OCR models (~500MB)
|
| 61 |
+
- Building Docker container
|
| 62 |
+
|
| 63 |
+
4. Watch the build logs for any errors
|
| 64 |
+
|
| 65 |
+
#### Step 4: Verify Deployment
|
| 66 |
+
|
| 67 |
+
Once built, test your API:
|
| 68 |
+
|
| 69 |
+
```bash
|
| 70 |
+
# Check health
|
| 71 |
+
curl https://YOUR-USERNAME-handyhome-ocr-api.hf.space/health
|
| 72 |
+
|
| 73 |
+
# Expected response:
|
| 74 |
+
# {"status":"healthy","service":"handyhome-ocr-api","version":"1.0.0"}
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
### Option 2: Git Command Line Deployment
|
| 78 |
+
|
| 79 |
+
#### Step 1: Create Space on Web
|
| 80 |
+
|
| 81 |
+
Follow Step 1 from Option 1 above.
|
| 82 |
+
|
| 83 |
+
#### Step 2: Clone Space Repository
|
| 84 |
+
|
| 85 |
+
```bash
|
| 86 |
+
# Install Git LFS (if not already installed)
|
| 87 |
+
git lfs install
|
| 88 |
+
|
| 89 |
+
# Clone your space
|
| 90 |
+
git clone https://huggingface.co/spaces/YOUR-USERNAME/handyhome-ocr-api
|
| 91 |
+
cd handyhome-ocr-api
|
| 92 |
+
```
|
| 93 |
+
|
| 94 |
+
#### Step 3: Copy Files
|
| 95 |
+
|
| 96 |
+
```bash
|
| 97 |
+
# Copy all files from huggingface-ocr folder
|
| 98 |
+
cp -r ../huggingface-ocr/* .
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
#### Step 4: Commit and Push
|
| 102 |
+
|
| 103 |
+
```bash
|
| 104 |
+
# Add all files
|
| 105 |
+
git add .
|
| 106 |
+
|
| 107 |
+
# Commit
|
| 108 |
+
git commit -m "Initial deployment of HandyHome OCR API"
|
| 109 |
+
|
| 110 |
+
# Push to Hugging Face
|
| 111 |
+
git push
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
#### Step 5: Monitor Build
|
| 115 |
+
|
| 116 |
+
Go to your Space URL to watch the build progress.
|
| 117 |
+
|
| 118 |
+
## Configuration
|
| 119 |
+
|
| 120 |
+
### Space Settings
|
| 121 |
+
|
| 122 |
+
In your Space settings, you can configure:
|
| 123 |
+
|
| 124 |
+
1. **Hardware**:
|
| 125 |
+
- **CPU basic** (free): 2 vCPU, 16GB RAM - Suitable for testing
|
| 126 |
+
- **CPU upgrade** (paid): Better performance
|
| 127 |
+
- **GPU** (paid): Faster OCR processing
|
| 128 |
+
|
| 129 |
+
2. **Sleep time**:
|
| 130 |
+
- Free tier: Sleeps after 48 hours of inactivity
|
| 131 |
+
- Paid tier: Can disable sleep
|
| 132 |
+
|
| 133 |
+
3. **Secrets** (if needed):
|
| 134 |
+
- Add environment variables in Settings → Repository secrets
|
| 135 |
+
|
| 136 |
+
### Custom Domain (Optional)
|
| 137 |
+
|
| 138 |
+
For production, you can set up a custom domain in Space settings.
|
| 139 |
+
|
| 140 |
+
## Testing Your Deployment
|
| 141 |
+
|
| 142 |
+
### Test Health Endpoint
|
| 143 |
+
|
| 144 |
+
```bash
|
| 145 |
+
curl https://YOUR-USERNAME-handyhome-ocr-api.hf.space/health
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
### Test OCR Extraction
|
| 149 |
+
|
| 150 |
+
```bash
|
| 151 |
+
# Test National ID extraction
|
| 152 |
+
curl -X POST https://YOUR-USERNAME-handyhome-ocr-api.hf.space/api/extract-national-id \
|
| 153 |
+
-H "Content-Type: application/json" \
|
| 154 |
+
-d '{"document_url": "YOUR_IMAGE_URL"}'
|
| 155 |
+
```
|
| 156 |
+
|
| 157 |
+
### Test in Python
|
| 158 |
+
|
| 159 |
+
```python
|
| 160 |
+
import requests
|
| 161 |
+
|
| 162 |
+
base_url = "https://YOUR-USERNAME-handyhome-ocr-api.hf.space"
|
| 163 |
+
|
| 164 |
+
# Test health
|
| 165 |
+
response = requests.get(f"{base_url}/health")
|
| 166 |
+
print(response.json())
|
| 167 |
+
|
| 168 |
+
# Test extraction
|
| 169 |
+
response = requests.post(
|
| 170 |
+
f"{base_url}/api/extract-national-id",
|
| 171 |
+
json={"document_url": "YOUR_IMAGE_URL"}
|
| 172 |
+
)
|
| 173 |
+
print(response.json())
|
| 174 |
+
```
|
| 175 |
+
|
| 176 |
+
## Integration with Your Main App
|
| 177 |
+
|
| 178 |
+
Update your main Flask app (`handyhome-web-scripts/app.py`) to use the Hugging Face Space:
|
| 179 |
+
|
| 180 |
+
```python
|
| 181 |
+
import requests
|
| 182 |
+
|
| 183 |
+
HUGGINGFACE_OCR_API = "https://YOUR-USERNAME-handyhome-ocr-api.hf.space"
|
| 184 |
+
|
| 185 |
+
@app.route('/extract-document', methods=['POST'])
|
| 186 |
+
def extract_document():
|
| 187 |
+
data = request.json
|
| 188 |
+
image_url = data.get('image_url')
|
| 189 |
+
document_type = data.get('document_type')
|
| 190 |
+
|
| 191 |
+
# Map document types to HF Space endpoints
|
| 192 |
+
endpoint_mapping = {
|
| 193 |
+
'National ID': '/api/extract-national-id',
|
| 194 |
+
"Driver's License": '/api/extract-drivers-license',
|
| 195 |
+
'PRC ID': '/api/extract-prc',
|
| 196 |
+
'UMID': '/api/extract-umid',
|
| 197 |
+
'SSS ID': '/api/extract-sss',
|
| 198 |
+
'Passport': '/api/extract-passport',
|
| 199 |
+
'Postal ID': '/api/extract-postal',
|
| 200 |
+
'PHIC': '/api/extract-phic',
|
| 201 |
+
'NBI Clearance': '/api/extract-nbi',
|
| 202 |
+
'Police Clearance': '/api/extract-police-clearance',
|
| 203 |
+
'TESDA': '/api/extract-tesda'
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
endpoint = endpoint_mapping.get(document_type)
|
| 207 |
+
if not endpoint:
|
| 208 |
+
return jsonify({'error': 'Unsupported document type'}), 400
|
| 209 |
+
|
| 210 |
+
# Call Hugging Face Space API
|
| 211 |
+
try:
|
| 212 |
+
response = requests.post(
|
| 213 |
+
f"{HUGGINGFACE_OCR_API}{endpoint}",
|
| 214 |
+
json={'document_url': image_url},
|
| 215 |
+
timeout=300
|
| 216 |
+
)
|
| 217 |
+
return jsonify(response.json())
|
| 218 |
+
except Exception as e:
|
| 219 |
+
return jsonify({'error': str(e)}), 500
|
| 220 |
+
```
|
| 221 |
+
|
| 222 |
+
## Monitoring and Maintenance
|
| 223 |
+
|
| 224 |
+
### Check Space Status
|
| 225 |
+
|
| 226 |
+
1. Go to your Space URL
|
| 227 |
+
2. Click **Settings** → **Usage**
|
| 228 |
+
3. Monitor:
|
| 229 |
+
- Request count
|
| 230 |
+
- Error rate
|
| 231 |
+
- Response times
|
| 232 |
+
- Memory usage
|
| 233 |
+
|
| 234 |
+
### View Logs
|
| 235 |
+
|
| 236 |
+
1. In your Space, click **App** tab
|
| 237 |
+
2. Scroll down to see real-time logs
|
| 238 |
+
3. Useful for debugging errors
|
| 239 |
+
|
| 240 |
+
### Update Deployment
|
| 241 |
+
|
| 242 |
+
To update your deployment:
|
| 243 |
+
|
| 244 |
+
**Web UI Method:**
|
| 245 |
+
1. Click **Files** tab
|
| 246 |
+
2. Click on file to edit
|
| 247 |
+
3. Make changes
|
| 248 |
+
4. Click **Commit changes**
|
| 249 |
+
|
| 250 |
+
**Git Method:**
|
| 251 |
+
```bash
|
| 252 |
+
cd handyhome-ocr-api
|
| 253 |
+
# Make changes to files
|
| 254 |
+
git add .
|
| 255 |
+
git commit -m "Update description"
|
| 256 |
+
git push
|
| 257 |
+
```
|
| 258 |
+
|
| 259 |
+
## Troubleshooting
|
| 260 |
+
|
| 261 |
+
### Build Fails
|
| 262 |
+
|
| 263 |
+
**Error: Out of memory**
|
| 264 |
+
- Solution: Reduce workers in Dockerfile or upgrade hardware
|
| 265 |
+
|
| 266 |
+
**Error: Timeout during build**
|
| 267 |
+
- Solution: This is normal for first build. Wait or restart build.
|
| 268 |
+
|
| 269 |
+
**Error: Missing dependencies**
|
| 270 |
+
- Solution: Check requirements.txt and Dockerfile
|
| 271 |
+
|
| 272 |
+
### Runtime Errors
|
| 273 |
+
|
| 274 |
+
**Error: Script not found**
|
| 275 |
+
- Solution: Ensure all `extract_*.py` files are uploaded
|
| 276 |
+
|
| 277 |
+
**Error: PaddleOCR model download fails**
|
| 278 |
+
- Solution: Models download on first use. Check internet connectivity.
|
| 279 |
+
|
| 280 |
+
**Error: 503 Service Unavailable**
|
| 281 |
+
- Solution: Space is sleeping. Wake it up by accessing the URL.
|
| 282 |
+
|
| 283 |
+
### Performance Issues
|
| 284 |
+
|
| 285 |
+
**Slow response times**
|
| 286 |
+
- Upgrade to better hardware tier
|
| 287 |
+
- Increase Gunicorn workers (may need more RAM)
|
| 288 |
+
- Consider caching frequently accessed documents
|
| 289 |
+
|
| 290 |
+
**Out of memory errors**
|
| 291 |
+
- Reduce Gunicorn workers in Dockerfile
|
| 292 |
+
- Upgrade to higher memory tier
|
| 293 |
+
- Process smaller images
|
| 294 |
+
|
| 295 |
+
## Cost Considerations
|
| 296 |
+
|
| 297 |
+
### Free Tier
|
| 298 |
+
- CPU basic hardware
|
| 299 |
+
- 48-hour sleep timeout
|
| 300 |
+
- Suitable for testing and low-traffic use
|
| 301 |
+
|
| 302 |
+
### Paid Tiers
|
| 303 |
+
- **CPU upgrade**: $0.03/hour (~$22/month)
|
| 304 |
+
- **GPU T4**: $0.60/hour (~$432/month)
|
| 305 |
+
- No sleep timeout
|
| 306 |
+
- Better performance
|
| 307 |
+
|
| 308 |
+
### Optimization Tips
|
| 309 |
+
- Use CPU for cost-effective deployment
|
| 310 |
+
- Enable sleep timeout for development
|
| 311 |
+
- Only upgrade if you need 24/7 availability or high performance
|
| 312 |
+
|
| 313 |
+
## Security Best Practices
|
| 314 |
+
|
| 315 |
+
1. **Use Private Spaces** for sensitive data
|
| 316 |
+
2. **Add authentication** if needed (custom middleware)
|
| 317 |
+
3. **Rate limiting** - Add to prevent abuse
|
| 318 |
+
4. **HTTPS only** - Hugging Face provides this by default
|
| 319 |
+
5. **Input validation** - Already implemented in scripts
|
| 320 |
+
6. **Secrets management** - Use HF Space secrets for API keys
|
| 321 |
+
|
| 322 |
+
## Support Resources
|
| 323 |
+
|
| 324 |
+
- **Hugging Face Spaces Docs**: https://huggingface.co/docs/hub/spaces
|
| 325 |
+
- **Docker SDK Guide**: https://huggingface.co/docs/hub/spaces-sdks-docker
|
| 326 |
+
- **Community Forum**: https://discuss.huggingface.co/
|
| 327 |
+
|
| 328 |
+
## Next Steps
|
| 329 |
+
|
| 330 |
+
After successful deployment:
|
| 331 |
+
|
| 332 |
+
1. ✅ Update your main app to use the HF Space API
|
| 333 |
+
2. ✅ Test all document types thoroughly
|
| 334 |
+
3. ✅ Set up monitoring and alerts
|
| 335 |
+
4. ✅ Document the API endpoints for your team
|
| 336 |
+
5. ✅ Consider setting up staging and production spaces
|
| 337 |
+
|
| 338 |
+
---
|
| 339 |
+
|
| 340 |
+
Happy deploying! 🚀
|
| 341 |
+
|
Dockerfile
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Dockerfile for HandyHome OCR API
|
| 2 |
+
FROM python:3.9-slim
|
| 3 |
+
|
| 4 |
+
# Install system dependencies for PaddleOCR and OpenCV
|
| 5 |
+
RUN apt-get update && apt-get install -y \
|
| 6 |
+
libglib2.0-0 \
|
| 7 |
+
libsm6 \
|
| 8 |
+
libxext6 \
|
| 9 |
+
libxrender-dev \
|
| 10 |
+
libgomp1 \
|
| 11 |
+
libgthread-2.0-0 \
|
| 12 |
+
libgl1 \
|
| 13 |
+
libglib2.0-0 \
|
| 14 |
+
curl \
|
| 15 |
+
wget \
|
| 16 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 17 |
+
|
| 18 |
+
WORKDIR /app
|
| 19 |
+
|
| 20 |
+
# Copy and install Python dependencies
|
| 21 |
+
COPY requirements.txt .
|
| 22 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 23 |
+
|
| 24 |
+
# Download PaddleOCR models during build to speed up first run
|
| 25 |
+
RUN python -c "from paddleocr import PaddleOCR; ocr = PaddleOCR(use_angle_cls=False, lang='en', show_log=False)"
|
| 26 |
+
|
| 27 |
+
# Copy application code
|
| 28 |
+
COPY . .
|
| 29 |
+
|
| 30 |
+
# Create non-root user for security
|
| 31 |
+
RUN useradd --create-home --shell /bin/bash app && chown -R app:app /app
|
| 32 |
+
USER app
|
| 33 |
+
|
| 34 |
+
EXPOSE 7860
|
| 35 |
+
|
| 36 |
+
# Health check
|
| 37 |
+
HEALTHCHECK --interval=30s --timeout=30s --start-period=5s --retries=3 \
|
| 38 |
+
CMD curl -f http://localhost:7860/health || exit 1
|
| 39 |
+
|
| 40 |
+
# Use Gunicorn for production
|
| 41 |
+
CMD ["gunicorn", "--bind", "0.0.0.0:7860", "--workers", "2", "--timeout", "300", "--max-requests", "1000", "--max-requests-jitter", "100", "app:app"]
|
| 42 |
+
|
QUICKSTART.md
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Quick Start Guide
|
| 2 |
+
|
| 3 |
+
Get your OCR API running on Hugging Face Spaces in minutes!
|
| 4 |
+
|
| 5 |
+
## 🚀 Deploy in 3 Steps
|
| 6 |
+
|
| 7 |
+
### Step 1: Create Space (2 minutes)
|
| 8 |
+
1. Go to https://huggingface.co/new-space
|
| 9 |
+
2. Name: `handyhome-ocr-api`
|
| 10 |
+
3. SDK: **Docker**
|
| 11 |
+
4. Click "Create Space"
|
| 12 |
+
|
| 13 |
+
### Step 2: Upload Files (3 minutes)
|
| 14 |
+
1. Click "Files" tab → "Add file" → "Upload files"
|
| 15 |
+
2. Upload all files from `huggingface-ocr` folder
|
| 16 |
+
3. Click "Commit changes to main"
|
| 17 |
+
|
| 18 |
+
### Step 3: Wait for Build (5-10 minutes)
|
| 19 |
+
- Go to "App" tab
|
| 20 |
+
- Watch build logs
|
| 21 |
+
- When done, you'll see: "Running on http://0.0.0.0:7860"
|
| 22 |
+
|
| 23 |
+
## ✅ Test Your API
|
| 24 |
+
|
| 25 |
+
```bash
|
| 26 |
+
# Check health
|
| 27 |
+
curl https://YOUR-USERNAME-handyhome-ocr-api.hf.space/health
|
| 28 |
+
|
| 29 |
+
# Test extraction
|
| 30 |
+
curl -X POST https://YOUR-USERNAME-handyhome-ocr-api.hf.space/api/extract-national-id \
|
| 31 |
+
-H "Content-Type: application/json" \
|
| 32 |
+
-d '{"document_url": "YOUR_IMAGE_URL"}'
|
| 33 |
+
```
|
| 34 |
+
|
| 35 |
+
## 📚 Available Endpoints
|
| 36 |
+
|
| 37 |
+
### Philippine IDs
|
| 38 |
+
- `/api/extract-national-id` - National ID
|
| 39 |
+
- `/api/extract-drivers-license` - Driver's License
|
| 40 |
+
- `/api/extract-prc` - PRC ID
|
| 41 |
+
- `/api/extract-umid` - UMID
|
| 42 |
+
- `/api/extract-sss` - SSS ID
|
| 43 |
+
- `/api/extract-passport` - Passport
|
| 44 |
+
- `/api/extract-postal` - Postal ID
|
| 45 |
+
- `/api/extract-phic` - PhilHealth ID
|
| 46 |
+
|
| 47 |
+
### Clearances
|
| 48 |
+
- `/api/extract-nbi` - NBI Clearance
|
| 49 |
+
- `/api/extract-police-clearance` - Police Clearance
|
| 50 |
+
- `/api/extract-tesda` - TESDA Certificate
|
| 51 |
+
|
| 52 |
+
### Utility
|
| 53 |
+
- `/api/analyze-document` - Identify document type
|
| 54 |
+
- `/health` - Health check
|
| 55 |
+
- `/` - Full API documentation
|
| 56 |
+
|
| 57 |
+
## 🔧 Integration Example
|
| 58 |
+
|
| 59 |
+
```python
|
| 60 |
+
import requests
|
| 61 |
+
|
| 62 |
+
# Your Hugging Face Space URL
|
| 63 |
+
API_BASE = "https://YOUR-USERNAME-handyhome-ocr-api.hf.space"
|
| 64 |
+
|
| 65 |
+
def extract_national_id(image_url):
|
| 66 |
+
response = requests.post(
|
| 67 |
+
f"{API_BASE}/api/extract-national-id",
|
| 68 |
+
json={"document_url": image_url},
|
| 69 |
+
timeout=300
|
| 70 |
+
)
|
| 71 |
+
return response.json()
|
| 72 |
+
|
| 73 |
+
# Use it
|
| 74 |
+
result = extract_national_id("https://example.com/id.jpg")
|
| 75 |
+
print(result)
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
## 💡 Tips
|
| 79 |
+
|
| 80 |
+
- **First request is slow**: PaddleOCR loads models on first use (~30 seconds)
|
| 81 |
+
- **Image quality matters**: Use clear, well-lit photos
|
| 82 |
+
- **Timeout**: Set timeout to 5 minutes for first request
|
| 83 |
+
- **Free tier**: Space sleeps after 48 hours of inactivity
|
| 84 |
+
|
| 85 |
+
## 🐛 Troubleshooting
|
| 86 |
+
|
| 87 |
+
**Build fails?**
|
| 88 |
+
- Wait and retry - first build can timeout
|
| 89 |
+
- Check build logs for specific errors
|
| 90 |
+
|
| 91 |
+
**503 Error?**
|
| 92 |
+
- Space is sleeping - just visit the URL to wake it
|
| 93 |
+
|
| 94 |
+
**Slow responses?**
|
| 95 |
+
- Normal for first request (loading models)
|
| 96 |
+
- Subsequent requests are faster (2-5 seconds)
|
| 97 |
+
|
| 98 |
+
## 📖 Full Documentation
|
| 99 |
+
|
| 100 |
+
- See `README.md` for complete API documentation
|
| 101 |
+
- See `DEPLOYMENT_GUIDE.md` for detailed deployment steps
|
| 102 |
+
|
| 103 |
+
## 🎯 Next Steps
|
| 104 |
+
|
| 105 |
+
1. Deploy your OCR Space
|
| 106 |
+
2. Update your main app to use it
|
| 107 |
+
3. Test with real documents
|
| 108 |
+
4. Monitor usage in Space settings
|
| 109 |
+
|
| 110 |
+
---
|
| 111 |
+
|
| 112 |
+
Need help? Check `DEPLOYMENT_GUIDE.md` for detailed instructions!
|
| 113 |
+
|
README.md
CHANGED
|
@@ -1,10 +1,259 @@
|
|
| 1 |
-
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
-
sdk: docker
|
| 7 |
-
pinned: false
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: HandyHome OCR API
|
| 3 |
+
emoji: 🔍
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: green
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
license: mit
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# HandyHome OCR Extraction API
|
| 12 |
+
|
| 13 |
+
Philippine ID and Document OCR Extraction Service using PaddleOCR
|
| 14 |
+
|
| 15 |
+
## 🎯 Features
|
| 16 |
+
|
| 17 |
+
### Supported Documents
|
| 18 |
+
|
| 19 |
+
#### Philippine Government IDs
|
| 20 |
+
- **National ID** - 19-digit ID number, full name, birth date
|
| 21 |
+
- **Driver's License** - License number, full name, address, birth date
|
| 22 |
+
- **UMID** - CRN, full name, birth date
|
| 23 |
+
- **SSS ID** - SSS number, full name, birth date
|
| 24 |
+
- **PRC ID** - PRC number, profession, full name, validity
|
| 25 |
+
- **Postal ID** - PRN, full name, address, birth date
|
| 26 |
+
- **PhilHealth ID** - ID number, full name, birth date, sex, address
|
| 27 |
+
|
| 28 |
+
#### Clearances & Certificates
|
| 29 |
+
- **NBI Clearance** - ID number, full name, birth date
|
| 30 |
+
- **Police Clearance** - ID number, full name, address, birth date, status
|
| 31 |
+
- **TESDA Certificate** - Registry number, full name, qualification, date issued
|
| 32 |
+
|
| 33 |
+
#### Passport
|
| 34 |
+
- **Philippine Passport** - Passport number, surname, given names, birth date, nationality
|
| 35 |
+
|
| 36 |
+
### Additional Features
|
| 37 |
+
- **Document Analysis** - Automatic document type identification
|
| 38 |
+
|
| 39 |
+
## 🚀 Quick Start
|
| 40 |
+
|
| 41 |
+
### API Endpoints
|
| 42 |
+
|
| 43 |
+
All extraction endpoints accept POST requests with the following format:
|
| 44 |
+
|
| 45 |
+
```json
|
| 46 |
+
{
|
| 47 |
+
"document_url": "https://example.com/document.jpg"
|
| 48 |
+
}
|
| 49 |
+
```
|
| 50 |
+
|
| 51 |
+
#### Philippine ID Endpoints
|
| 52 |
+
- `POST /api/extract-national-id` - Extract National ID
|
| 53 |
+
- `POST /api/extract-drivers-license` - Extract Driver's License
|
| 54 |
+
- `POST /api/extract-prc` - Extract PRC ID
|
| 55 |
+
- `POST /api/extract-umid` - Extract UMID
|
| 56 |
+
- `POST /api/extract-sss` - Extract SSS ID
|
| 57 |
+
- `POST /api/extract-passport` - Extract Passport
|
| 58 |
+
- `POST /api/extract-postal` - Extract Postal ID
|
| 59 |
+
- `POST /api/extract-phic` - Extract PhilHealth ID
|
| 60 |
+
|
| 61 |
+
#### Clearance Endpoints
|
| 62 |
+
- `POST /api/extract-nbi` - Extract NBI Clearance
|
| 63 |
+
- `POST /api/extract-police-clearance` - Extract Police Clearance
|
| 64 |
+
- `POST /api/extract-tesda` - Extract TESDA Certificate
|
| 65 |
+
|
| 66 |
+
#### Analysis Endpoint
|
| 67 |
+
- `POST /api/analyze-document` - Identify document type
|
| 68 |
+
|
| 69 |
+
#### Utility Endpoints
|
| 70 |
+
- `GET /health` - Health check
|
| 71 |
+
- `GET /` - API documentation
|
| 72 |
+
- `GET /api/routes` - List all routes
|
| 73 |
+
|
| 74 |
+
## 📝 Usage Examples
|
| 75 |
+
|
| 76 |
+
### Python Example
|
| 77 |
+
|
| 78 |
+
```python
|
| 79 |
+
import requests
|
| 80 |
+
|
| 81 |
+
# Extract National ID
|
| 82 |
+
response = requests.post(
|
| 83 |
+
'https://YOUR-SPACE.hf.space/api/extract-national-id',
|
| 84 |
+
json={'document_url': 'https://example.com/national_id.jpg'}
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
result = response.json()
|
| 88 |
+
print(result)
|
| 89 |
+
|
| 90 |
+
# Expected output:
|
| 91 |
+
# {
|
| 92 |
+
# "success": true,
|
| 93 |
+
# "id_number": "1234-5678-9012-3456",
|
| 94 |
+
# "full_name": "Juan Dela Cruz",
|
| 95 |
+
# "birth_date": "1990-01-15"
|
| 96 |
+
# }
|
| 97 |
+
```
|
| 98 |
+
|
| 99 |
+
### cURL Example
|
| 100 |
+
|
| 101 |
+
```bash
|
| 102 |
+
curl -X POST https://YOUR-SPACE.hf.space/api/extract-national-id \
|
| 103 |
+
-H "Content-Type: application/json" \
|
| 104 |
+
-d '{"document_url": "https://example.com/national_id.jpg"}'
|
| 105 |
+
```
|
| 106 |
+
|
| 107 |
+
### JavaScript Example
|
| 108 |
+
|
| 109 |
+
```javascript
|
| 110 |
+
const response = await fetch('https://YOUR-SPACE.hf.space/api/extract-national-id', {
|
| 111 |
+
method: 'POST',
|
| 112 |
+
headers: { 'Content-Type': 'application/json' },
|
| 113 |
+
body: JSON.stringify({
|
| 114 |
+
document_url: 'https://example.com/national_id.jpg'
|
| 115 |
+
})
|
| 116 |
+
});
|
| 117 |
+
|
| 118 |
+
const result = await response.json();
|
| 119 |
+
console.log(result);
|
| 120 |
+
```
|
| 121 |
+
|
| 122 |
+
## 🛠️ Technical Details
|
| 123 |
+
|
| 124 |
+
### Technology Stack
|
| 125 |
+
- **OCR Engine**: PaddleOCR 2.7+
|
| 126 |
+
- **Framework**: Flask + Gunicorn
|
| 127 |
+
- **Image Processing**: OpenCV, Pillow
|
| 128 |
+
- **Runtime**: Python 3.9
|
| 129 |
+
|
| 130 |
+
### Performance
|
| 131 |
+
- Average response time: 2-5 seconds per document
|
| 132 |
+
- Supports images up to 10MB
|
| 133 |
+
- Concurrent request handling with Gunicorn workers
|
| 134 |
+
|
| 135 |
+
### Resource Requirements
|
| 136 |
+
- RAM: 4GB minimum
|
| 137 |
+
- Storage: 2GB (includes PaddleOCR models)
|
| 138 |
+
- CPU: 2 cores recommended
|
| 139 |
+
|
| 140 |
+
## 📦 Deployment to Hugging Face Spaces
|
| 141 |
+
|
| 142 |
+
### Step 1: Create a New Space
|
| 143 |
+
|
| 144 |
+
1. Go to [Hugging Face Spaces](https://huggingface.co/new-space)
|
| 145 |
+
2. Enter space name: `handyhome-ocr-api` (or your preferred name)
|
| 146 |
+
3. Select **Docker** as SDK
|
| 147 |
+
4. Choose visibility: Public or Private
|
| 148 |
+
5. Click "Create Space"
|
| 149 |
+
|
| 150 |
+
### Step 2: Upload Files
|
| 151 |
+
|
| 152 |
+
Upload all files from this directory to your Space:
|
| 153 |
+
- `app.py`
|
| 154 |
+
- `requirements.txt`
|
| 155 |
+
- `Dockerfile`
|
| 156 |
+
- `README.md`
|
| 157 |
+
- All `extract_*.py` scripts
|
| 158 |
+
- `analyze_document.py`
|
| 159 |
+
|
| 160 |
+
### Step 3: Configure Space Settings
|
| 161 |
+
|
| 162 |
+
1. In your Space settings, set:
|
| 163 |
+
- **SDK**: Docker
|
| 164 |
+
- **Port**: 7860
|
| 165 |
+
- **Sleep time**: 48 hours (optional)
|
| 166 |
+
|
| 167 |
+
2. The Space will automatically build and deploy
|
| 168 |
+
|
| 169 |
+
### Step 4: Wait for Build
|
| 170 |
+
|
| 171 |
+
- Initial build takes 5-10 minutes
|
| 172 |
+
- PaddleOCR models are downloaded during build
|
| 173 |
+
- Check build logs for any errors
|
| 174 |
+
|
| 175 |
+
### Step 5: Test Your API
|
| 176 |
+
|
| 177 |
+
Once deployed, test the health endpoint:
|
| 178 |
+
```bash
|
| 179 |
+
curl https://YOUR-USERNAME-handyhome-ocr-api.hf.space/health
|
| 180 |
+
```
|
| 181 |
+
|
| 182 |
+
## 🔧 Local Development
|
| 183 |
+
|
| 184 |
+
### Setup
|
| 185 |
+
|
| 186 |
+
```bash
|
| 187 |
+
# Install dependencies
|
| 188 |
+
pip install -r requirements.txt
|
| 189 |
+
|
| 190 |
+
# Run Flask development server
|
| 191 |
+
python app.py
|
| 192 |
+
```
|
| 193 |
+
|
| 194 |
+
### Testing
|
| 195 |
+
|
| 196 |
+
```bash
|
| 197 |
+
# Test with a document URL
|
| 198 |
+
curl -X POST http://localhost:7860/api/extract-national-id \
|
| 199 |
+
-H "Content-Type: application/json" \
|
| 200 |
+
-d '{"document_url": "YOUR_IMAGE_URL"}'
|
| 201 |
+
```
|
| 202 |
+
|
| 203 |
+
## 📊 Response Format
|
| 204 |
+
|
| 205 |
+
### Successful Response
|
| 206 |
+
|
| 207 |
+
```json
|
| 208 |
+
{
|
| 209 |
+
"success": true,
|
| 210 |
+
"id_number": "1234-5678-9012-3456",
|
| 211 |
+
"full_name": "Juan Dela Cruz",
|
| 212 |
+
"birth_date": "1990-01-15",
|
| 213 |
+
...additional fields...
|
| 214 |
+
}
|
| 215 |
+
```
|
| 216 |
+
|
| 217 |
+
### Error Response
|
| 218 |
+
|
| 219 |
+
```json
|
| 220 |
+
{
|
| 221 |
+
"success": false,
|
| 222 |
+
"error": "Error description",
|
| 223 |
+
"stderr": "Detailed error message"
|
| 224 |
+
}
|
| 225 |
+
```
|
| 226 |
+
|
| 227 |
+
## ⚠️ Limitations
|
| 228 |
+
|
| 229 |
+
- Requires clear, readable document images
|
| 230 |
+
- Works best with well-lit, high-resolution scans
|
| 231 |
+
- OCR accuracy depends on image quality
|
| 232 |
+
- Some fields may be null if not detected
|
| 233 |
+
- Processing time varies based on image size
|
| 234 |
+
|
| 235 |
+
## 🔐 Security Considerations
|
| 236 |
+
|
| 237 |
+
- Images are processed in memory and not stored permanently
|
| 238 |
+
- All processing happens server-side
|
| 239 |
+
- Sensitive data should be transmitted over HTTPS
|
| 240 |
+
- Consider rate limiting for production use
|
| 241 |
+
|
| 242 |
+
## 📄 License
|
| 243 |
+
|
| 244 |
+
MIT License - See LICENSE file for details
|
| 245 |
+
|
| 246 |
+
## 🤝 Contributing
|
| 247 |
+
|
| 248 |
+
Contributions welcome! Please submit issues and pull requests.
|
| 249 |
+
|
| 250 |
+
## 📞 Support
|
| 251 |
+
|
| 252 |
+
For issues and questions:
|
| 253 |
+
- Open an issue on GitHub
|
| 254 |
+
- Contact: [Your contact information]
|
| 255 |
+
|
| 256 |
+
---
|
| 257 |
+
|
| 258 |
+
Built with ❤️ using PaddleOCR and Flask
|
| 259 |
+
|
analyze_document.py
ADDED
|
@@ -0,0 +1,352 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Document Authenticity Analysis Script
|
| 4 |
+
|
| 5 |
+
Purpose:
|
| 6 |
+
Performs forensic analysis on document images to detect tampering and verify authenticity.
|
| 7 |
+
Uses Error Level Analysis (ELA) and metadata examination techniques.
|
| 8 |
+
|
| 9 |
+
Why this script exists:
|
| 10 |
+
- Need to verify document authenticity in verification workflows
|
| 11 |
+
- Detect potential image tampering or editing
|
| 12 |
+
- Provide forensic evidence for document verification
|
| 13 |
+
- Support fraud prevention in document processing systems
|
| 14 |
+
|
| 15 |
+
Key Features:
|
| 16 |
+
- Error Level Analysis (ELA) for tampering detection
|
| 17 |
+
- EXIF metadata analysis for editing software detection
|
| 18 |
+
- Brightness ratio analysis for tampering assessment
|
| 19 |
+
- Support for single document analysis
|
| 20 |
+
|
| 21 |
+
Dependencies:
|
| 22 |
+
- Pillow (PIL): Image processing (https://pillow.readthedocs.io/)
|
| 23 |
+
- exifread: EXIF metadata extraction (https://pypi.org/project/ExifRead/)
|
| 24 |
+
- numpy: Numerical operations (https://numpy.org/doc/)
|
| 25 |
+
- requests: HTTP library (https://docs.python-requests.org/)
|
| 26 |
+
|
| 27 |
+
Usage:
|
| 28 |
+
python analyze_document.py "https://example.com/document.jpg"
|
| 29 |
+
|
| 30 |
+
Output:
|
| 31 |
+
JSON with tampering and metadata analysis results
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
import sys, json, requests, exifread,numpy as np
|
| 35 |
+
from PIL import Image, ImageChops, ImageEnhance
|
| 36 |
+
from io import BytesIO
|
| 37 |
+
import uuid, os
|
| 38 |
+
|
| 39 |
+
# List of photo editing software to detect in metadata
|
| 40 |
+
# This helps identify if an image has been edited
|
| 41 |
+
software_list = ['photoshop', 'lightroom', 'gimp', 'paint', 'paint.net', 'paintshop pro', 'paintshop pro x', 'paintshop pro x2', 'paintshop pro x3', 'paintshop pro x4', 'paintshop pro x5', 'paintshop pro x6', 'paintshop pro x7', 'paintshop pro x8', 'paintshop pro x9', 'paintshop pro x10']
|
| 42 |
+
|
| 43 |
+
def download_image(url, output_path='temp_image.jpg'):
|
| 44 |
+
"""
|
| 45 |
+
Download image from URL and process it for analysis.
|
| 46 |
+
|
| 47 |
+
Args:
|
| 48 |
+
url (str): Image URL
|
| 49 |
+
output_path (str): Local save path
|
| 50 |
+
|
| 51 |
+
Returns:
|
| 52 |
+
str: Path to processed image or None if failed
|
| 53 |
+
|
| 54 |
+
Why this approach:
|
| 55 |
+
- Handles JSON-wrapped URLs (common in web applications)
|
| 56 |
+
- Converts RGBA to RGB for JPEG compatibility
|
| 57 |
+
- Provides detailed error handling and logging
|
| 58 |
+
- Uses high quality to preserve analysis accuracy
|
| 59 |
+
"""
|
| 60 |
+
print(f"DOCUMENT Starting download for URL: {url}", file=sys.stderr)
|
| 61 |
+
|
| 62 |
+
# Handle URL that might be a JSON string
|
| 63 |
+
if isinstance(url, str) and (url.startswith('{') or url.startswith('[')):
|
| 64 |
+
try:
|
| 65 |
+
url_data = json.loads(url)
|
| 66 |
+
print(f"DOCUMENT Parsed JSON URL data: {url_data}", file=sys.stderr)
|
| 67 |
+
if isinstance(url_data, dict) and 'url' in url_data:
|
| 68 |
+
url = url_data['url']
|
| 69 |
+
print(f"DOCUMENT Extracted URL from dict: {url}", file=sys.stderr)
|
| 70 |
+
elif isinstance(url_data, list) and len(url_data) > 0:
|
| 71 |
+
url = url_data[0] if isinstance(url_data[0], str) else url_data[0].get('url', '')
|
| 72 |
+
print(f"DOCUMENT Extracted URL from list: {url}", file=sys.stderr)
|
| 73 |
+
except json.JSONDecodeError as e:
|
| 74 |
+
print(f"DOCUMENT Error parsing URL JSON: {url}, Error: {str(e)}", file=sys.stderr)
|
| 75 |
+
return None
|
| 76 |
+
|
| 77 |
+
if not url or url == '':
|
| 78 |
+
print(f"DOCUMENT Empty URL after processing", file=sys.stderr)
|
| 79 |
+
return None
|
| 80 |
+
|
| 81 |
+
try:
|
| 82 |
+
response = requests.get(url)
|
| 83 |
+
response.raise_for_status()
|
| 84 |
+
image_data = response.content
|
| 85 |
+
|
| 86 |
+
print(f"DOCUMENT Downloaded image from {url}", file=sys.stderr)
|
| 87 |
+
print(f"DOCUMENT Image data size: {len(image_data)} bytes", file=sys.stderr)
|
| 88 |
+
|
| 89 |
+
# Process the image
|
| 90 |
+
image = Image.open(BytesIO(image_data))
|
| 91 |
+
print(f"DOCUMENT Original image format: {image.format}", file=sys.stderr)
|
| 92 |
+
print(f"DOCUMENT Original image mode: {image.mode}", file=sys.stderr)
|
| 93 |
+
|
| 94 |
+
# Convert to RGB if necessary (JPEG doesn't support alpha channel)
|
| 95 |
+
if image.mode == 'RGBA':
|
| 96 |
+
background = Image.new('RGB', image.size, (255, 255, 255))
|
| 97 |
+
background.paste(image, mask=image.split()[-1])
|
| 98 |
+
image = background
|
| 99 |
+
print(f"DOCUMENT Converted RGBA to RGB", file=sys.stderr)
|
| 100 |
+
elif image.mode != 'RGB':
|
| 101 |
+
image = image.convert('RGB')
|
| 102 |
+
print(f"DOCUMENT Converted {image.mode} to RGB", file=sys.stderr)
|
| 103 |
+
|
| 104 |
+
# Save as JPG without trying to preserve EXIF
|
| 105 |
+
image.save(output_path, 'JPEG', quality=95)
|
| 106 |
+
print(f"DOCUMENT Saved image to {output_path}", file=sys.stderr)
|
| 107 |
+
|
| 108 |
+
return output_path
|
| 109 |
+
|
| 110 |
+
except Exception as e:
|
| 111 |
+
print(f"DOCUMENT Error processing image: {str(e)}", file=sys.stderr)
|
| 112 |
+
return None
|
| 113 |
+
|
| 114 |
+
# Tampering Function
|
| 115 |
+
def perform_error_level_analysis(image_path, output_path="ELA.png", quality=90):
|
| 116 |
+
"""
|
| 117 |
+
Perform Error Level Analysis (ELA) on image.
|
| 118 |
+
|
| 119 |
+
Args:
|
| 120 |
+
image_path (str): Path to original image
|
| 121 |
+
output_path (str): Path to save ELA result
|
| 122 |
+
quality (int): JPEG quality for recompression
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
str: Path to ELA result image
|
| 126 |
+
|
| 127 |
+
Why ELA:
|
| 128 |
+
- Detects areas of image that have been edited or tampered with
|
| 129 |
+
- Works by comparing original image with recompressed version
|
| 130 |
+
- Edited areas show different compression artifacts
|
| 131 |
+
- Standard forensic technique for image authenticity
|
| 132 |
+
"""
|
| 133 |
+
print(f"DOCUMENT TAMPERING Starting Error Level Analysis...", file=sys.stderr)
|
| 134 |
+
|
| 135 |
+
try:
|
| 136 |
+
original_image = Image.open(image_path)
|
| 137 |
+
|
| 138 |
+
# Convert RGBA to RGB if necessary (JPEG doesn't support alpha channel)
|
| 139 |
+
if original_image.mode == 'RGBA':
|
| 140 |
+
# Create a white background
|
| 141 |
+
background = Image.new('RGB', original_image.size, (255, 255, 255))
|
| 142 |
+
# Paste the image onto the background
|
| 143 |
+
background.paste(original_image, mask=original_image.split()[-1]) # Use alpha channel as mask
|
| 144 |
+
original_image = background
|
| 145 |
+
elif original_image.mode != 'RGB':
|
| 146 |
+
original_image = original_image.convert('RGB')
|
| 147 |
+
|
| 148 |
+
# Use a unique temp file name to avoid conflicts
|
| 149 |
+
temp_file = f"temp_ela_{str(uuid.uuid4())[:8]}.jpg"
|
| 150 |
+
original_image.save(temp_file, "JPEG", quality=quality)
|
| 151 |
+
resaved_image = Image.open(temp_file)
|
| 152 |
+
|
| 153 |
+
difference = ImageChops.difference(original_image, resaved_image)
|
| 154 |
+
difference = ImageEnhance.Brightness(difference).enhance(10)
|
| 155 |
+
|
| 156 |
+
# Save the difference image
|
| 157 |
+
difference.save(output_path, format='PNG')
|
| 158 |
+
|
| 159 |
+
# Clean up temp file
|
| 160 |
+
try:
|
| 161 |
+
os.remove(temp_file)
|
| 162 |
+
except:
|
| 163 |
+
pass
|
| 164 |
+
|
| 165 |
+
print(f"DOCUMENT TAMPERING ELA analysis completed, saved to {output_path}", file=sys.stderr)
|
| 166 |
+
return output_path
|
| 167 |
+
|
| 168 |
+
except Exception as e:
|
| 169 |
+
print(f"DOCUMENT TAMPERING Error during ELA: {str(e)}", file=sys.stderr)
|
| 170 |
+
return None
|
| 171 |
+
|
| 172 |
+
def detect_tampering(ela_path, threshold=100):
|
| 173 |
+
"""
|
| 174 |
+
Analyze ELA results to detect tampering.
|
| 175 |
+
|
| 176 |
+
Args:
|
| 177 |
+
ela_path (str): Path to ELA result image
|
| 178 |
+
threshold (int): Brightness threshold for tampering detection
|
| 179 |
+
|
| 180 |
+
Returns:
|
| 181 |
+
dict: Tampering analysis results
|
| 182 |
+
|
| 183 |
+
Why this approach:
|
| 184 |
+
- Analyzes brightness distribution in ELA image
|
| 185 |
+
- High brightness indicates areas of potential tampering
|
| 186 |
+
- Uses statistical thresholds for tampering assessment
|
| 187 |
+
- Provides confidence levels for tampering detection
|
| 188 |
+
"""
|
| 189 |
+
print(f"DOCUMENT TAMPERING Analyzing ELA results...", file=sys.stderr)
|
| 190 |
+
|
| 191 |
+
ela_image = Image.open(ela_path)
|
| 192 |
+
ela_array = np.array(ela_image)
|
| 193 |
+
|
| 194 |
+
bright_pixels = np.sum(ela_array > threshold)
|
| 195 |
+
total_pixels = ela_array.size
|
| 196 |
+
|
| 197 |
+
brightness_ratio = bright_pixels / total_pixels
|
| 198 |
+
|
| 199 |
+
print(f"DOCUMENT TAMPERING Brightness ratio: {brightness_ratio:.4f}", file=sys.stderr)
|
| 200 |
+
|
| 201 |
+
if brightness_ratio < 0.02:
|
| 202 |
+
return {
|
| 203 |
+
"tampered": "False",
|
| 204 |
+
"brightness_ratio": round(float(brightness_ratio), 4)
|
| 205 |
+
}
|
| 206 |
+
elif brightness_ratio > 0.02 and brightness_ratio <= 0.05:
|
| 207 |
+
return {
|
| 208 |
+
"tampered": "Investigation Required",
|
| 209 |
+
"brightness_ratio": round(float(brightness_ratio), 4)
|
| 210 |
+
}
|
| 211 |
+
elif brightness_ratio > 0.05:
|
| 212 |
+
return {
|
| 213 |
+
"tampered": "True",
|
| 214 |
+
"brightness_ratio": round(float(brightness_ratio), 4)
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
# Metadata Analysis Function (TO BE TESTED IN MOBILE)
|
| 218 |
+
def analyze_metadata(image_path):
|
| 219 |
+
"""
|
| 220 |
+
Analyze EXIF metadata for editing software and authenticity indicators.
|
| 221 |
+
|
| 222 |
+
Args:
|
| 223 |
+
image_path (str): Path to image file
|
| 224 |
+
|
| 225 |
+
Returns:
|
| 226 |
+
dict: Metadata analysis results
|
| 227 |
+
|
| 228 |
+
Why this approach:
|
| 229 |
+
- EXIF metadata contains information about image creation and editing
|
| 230 |
+
- Detects photo editing software used
|
| 231 |
+
- Identifies camera information and timestamps
|
| 232 |
+
- Provides forensic evidence for image authenticity
|
| 233 |
+
"""
|
| 234 |
+
print(f"DOCUMENT METADATA Starting metadata analysis...", file=sys.stderr)
|
| 235 |
+
|
| 236 |
+
try:
|
| 237 |
+
with open(image_path, 'rb') as f:
|
| 238 |
+
tags = exifread.process_file(f)
|
| 239 |
+
metadata = {tag: str(tags.get(tag)) for tag in tags.keys()}
|
| 240 |
+
|
| 241 |
+
# Debug: Print what we found
|
| 242 |
+
print(f"DOCUMENT METADATA Found {len(metadata)} metadata tags", file=sys.stderr)
|
| 243 |
+
if metadata:
|
| 244 |
+
print(f"DOCUMENT METADATA Metadata keys: {list(metadata.keys())}", file=sys.stderr)
|
| 245 |
+
|
| 246 |
+
except Exception as e:
|
| 247 |
+
print(f"DOCUMENT METADATA Error reading metadata: {str(e)}", file=sys.stderr)
|
| 248 |
+
return {
|
| 249 |
+
"result": "error",
|
| 250 |
+
"message": f"Error reading metadata: {str(e)}",
|
| 251 |
+
"metadata": {}
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
if not metadata:
|
| 255 |
+
print(f"DOCUMENT METADATA No metadata found in {image_path}", file=sys.stderr)
|
| 256 |
+
return {
|
| 257 |
+
"result": "no metadata",
|
| 258 |
+
"message": "No metadata found in image.",
|
| 259 |
+
"metadata": metadata
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
# Check for timestamp metadata
|
| 263 |
+
has_timestamp = any(key in metadata for key in ['EXIF DateTimeOriginal', 'Image DateTime', 'EXIF DateTime'])
|
| 264 |
+
|
| 265 |
+
# Check for editing software
|
| 266 |
+
software_used = metadata.get('Image Software', '').lower()
|
| 267 |
+
|
| 268 |
+
# Check for camera information
|
| 269 |
+
has_camera_info = any(key in metadata for key in ['Image Make', 'Image Model', 'EXIF Make', 'EXIF Model'])
|
| 270 |
+
|
| 271 |
+
# Provide more detailed analysis
|
| 272 |
+
analysis_summary = []
|
| 273 |
+
if has_timestamp:
|
| 274 |
+
analysis_summary.append("Timestamp found")
|
| 275 |
+
if has_camera_info:
|
| 276 |
+
analysis_summary.append("Camera information found")
|
| 277 |
+
if software_used:
|
| 278 |
+
analysis_summary.append(f"Software detected: {software_used}")
|
| 279 |
+
|
| 280 |
+
if not has_timestamp and not has_camera_info and not software_used:
|
| 281 |
+
return {
|
| 282 |
+
"result": "minimal metadata",
|
| 283 |
+
"message": "Image contains minimal metadata (no timestamp, camera info, or editing software detected)",
|
| 284 |
+
"metadata": metadata,
|
| 285 |
+
"analysis": "Image appears to be legitimate with no signs of editing"
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
if any(term in software_used for term in software_list):
|
| 289 |
+
return {
|
| 290 |
+
"result": "edited",
|
| 291 |
+
"message": f"Image appears to have been edited with software: {software_used}",
|
| 292 |
+
"metadata": metadata,
|
| 293 |
+
"analysis": "Suspicious editing software detected"
|
| 294 |
+
}
|
| 295 |
+
|
| 296 |
+
return {
|
| 297 |
+
"result": "success",
|
| 298 |
+
"message": f"Successfully analyzed metadata: {', '.join(analysis_summary)}",
|
| 299 |
+
"metadata": metadata,
|
| 300 |
+
"analysis": "Image appears to be legitimate with no signs of editing"
|
| 301 |
+
}
|
| 302 |
+
|
| 303 |
+
# Main execution
|
| 304 |
+
if __name__ == "__main__":
|
| 305 |
+
# Validate command line arguments
|
| 306 |
+
if len(sys.argv) < 2:
|
| 307 |
+
print(json.dumps({"error": "No image URL provided"}))
|
| 308 |
+
sys.exit(1)
|
| 309 |
+
|
| 310 |
+
image_url = sys.argv[1]
|
| 311 |
+
|
| 312 |
+
try:
|
| 313 |
+
# Download and process the image
|
| 314 |
+
image_path = download_image(image_url)
|
| 315 |
+
if not image_path:
|
| 316 |
+
raise Exception("Failed to download image")
|
| 317 |
+
|
| 318 |
+
# Perform metadata analysis first (before any file gets deleted)
|
| 319 |
+
metadata_results = analyze_metadata(image_path)
|
| 320 |
+
|
| 321 |
+
# Perform ELA and tampering detection
|
| 322 |
+
ela_path = perform_error_level_analysis(image_path)
|
| 323 |
+
if not ela_path:
|
| 324 |
+
raise Exception("Failed to perform error level analysis")
|
| 325 |
+
|
| 326 |
+
tampering_results = detect_tampering(ela_path)
|
| 327 |
+
|
| 328 |
+
# Clean up files after all analysis is done
|
| 329 |
+
try:
|
| 330 |
+
if ela_path:
|
| 331 |
+
os.remove(ela_path)
|
| 332 |
+
if image_path:
|
| 333 |
+
os.remove(image_path)
|
| 334 |
+
except:
|
| 335 |
+
pass
|
| 336 |
+
|
| 337 |
+
# Return combined results
|
| 338 |
+
print(json.dumps({
|
| 339 |
+
"success": True,
|
| 340 |
+
"tampering_results": tampering_results,
|
| 341 |
+
"metadata_results": metadata_results
|
| 342 |
+
}))
|
| 343 |
+
|
| 344 |
+
except Exception as e:
|
| 345 |
+
print(json.dumps({
|
| 346 |
+
"success": False,
|
| 347 |
+
"error": str(e),
|
| 348 |
+
"context": {
|
| 349 |
+
"url": image_url
|
| 350 |
+
}
|
| 351 |
+
}))
|
| 352 |
+
sys.exit(1)
|
app.py
ADDED
|
@@ -0,0 +1,403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from flask import Flask, request, jsonify
|
| 2 |
+
import sys
|
| 3 |
+
import os
|
| 4 |
+
import subprocess
|
| 5 |
+
import json
|
| 6 |
+
|
| 7 |
+
# Suppress PaddleOCR verbose logging
|
| 8 |
+
os.environ['PADDLEOCR_LOG_LEVEL'] = 'ERROR'
|
| 9 |
+
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
|
| 10 |
+
os.environ['DISPLAY'] = ':99'
|
| 11 |
+
|
| 12 |
+
def run_extraction_script(script_name, document_url):
|
| 13 |
+
"""Generic function to run OCR extraction scripts"""
|
| 14 |
+
try:
|
| 15 |
+
cmd = [sys.executable, script_name, document_url]
|
| 16 |
+
result = subprocess.run(
|
| 17 |
+
cmd,
|
| 18 |
+
capture_output=True,
|
| 19 |
+
text=True,
|
| 20 |
+
timeout=300,
|
| 21 |
+
cwd=os.getcwd()
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
if result.returncode != 0:
|
| 25 |
+
return {
|
| 26 |
+
'success': False,
|
| 27 |
+
'error': f'Script failed with return code {result.returncode}',
|
| 28 |
+
'stderr': result.stderr,
|
| 29 |
+
'stdout': result.stdout
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
# Parse JSON output
|
| 33 |
+
try:
|
| 34 |
+
output_str = result.stdout.strip()
|
| 35 |
+
|
| 36 |
+
# Try direct parse first
|
| 37 |
+
try:
|
| 38 |
+
return json.loads(output_str)
|
| 39 |
+
except json.JSONDecodeError:
|
| 40 |
+
# Find the last JSON object in output
|
| 41 |
+
lines = output_str.split('\n')
|
| 42 |
+
json_lines = [line.strip() for line in lines if line.strip().startswith('{')]
|
| 43 |
+
|
| 44 |
+
if json_lines:
|
| 45 |
+
return json.loads(json_lines[-1])
|
| 46 |
+
|
| 47 |
+
# Try extracting JSON from the output
|
| 48 |
+
start_idx = output_str.rfind('{')
|
| 49 |
+
end_idx = output_str.rfind('}')
|
| 50 |
+
if start_idx != -1 and end_idx != -1 and end_idx >= start_idx:
|
| 51 |
+
return json.loads(output_str[start_idx:end_idx+1])
|
| 52 |
+
|
| 53 |
+
raise ValueError("No valid JSON found in output")
|
| 54 |
+
|
| 55 |
+
except Exception as e:
|
| 56 |
+
return {
|
| 57 |
+
'success': False,
|
| 58 |
+
'error': 'Invalid JSON output from script',
|
| 59 |
+
'raw_output': result.stdout[:500], # Limit output size
|
| 60 |
+
'json_error': str(e)
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
except subprocess.TimeoutExpired:
|
| 64 |
+
return {
|
| 65 |
+
'success': False,
|
| 66 |
+
'error': 'Script execution timed out after 5 minutes'
|
| 67 |
+
}
|
| 68 |
+
except Exception as e:
|
| 69 |
+
return {
|
| 70 |
+
'success': False,
|
| 71 |
+
'error': f'Unexpected error: {str(e)}'
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
# Create Flask app
|
| 75 |
+
app = Flask(__name__)
|
| 76 |
+
|
| 77 |
+
# Configure Flask for production
|
| 78 |
+
app.config['JSON_SORT_KEYS'] = False
|
| 79 |
+
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
|
| 80 |
+
|
| 81 |
+
# ============================================================================
|
| 82 |
+
# PHILIPPINE ID OCR EXTRACTION ENDPOINTS
|
| 83 |
+
# ============================================================================
|
| 84 |
+
|
| 85 |
+
@app.route('/api/extract-national-id', methods=['POST'])
|
| 86 |
+
def api_extract_national_id():
|
| 87 |
+
"""Extract Philippine National ID details"""
|
| 88 |
+
try:
|
| 89 |
+
data = request.json
|
| 90 |
+
document_url = data.get('document_url')
|
| 91 |
+
|
| 92 |
+
if not document_url:
|
| 93 |
+
return jsonify({'error': 'Missing document_url'}), 400
|
| 94 |
+
|
| 95 |
+
result = run_extraction_script('extract_national_id.py', document_url)
|
| 96 |
+
return jsonify(result)
|
| 97 |
+
|
| 98 |
+
except Exception as e:
|
| 99 |
+
return jsonify({'success': False, 'error': str(e)}), 500
|
| 100 |
+
|
| 101 |
+
@app.route('/api/extract-drivers-license', methods=['POST'])
|
| 102 |
+
def api_extract_drivers_license():
|
| 103 |
+
"""Extract Philippine Driver's License details"""
|
| 104 |
+
try:
|
| 105 |
+
data = request.json
|
| 106 |
+
document_url = data.get('document_url')
|
| 107 |
+
|
| 108 |
+
if not document_url:
|
| 109 |
+
return jsonify({'error': 'Missing document_url'}), 400
|
| 110 |
+
|
| 111 |
+
result = run_extraction_script('extract_drivers_license.py', document_url)
|
| 112 |
+
return jsonify(result)
|
| 113 |
+
|
| 114 |
+
except Exception as e:
|
| 115 |
+
return jsonify({'success': False, 'error': str(e)}), 500
|
| 116 |
+
|
| 117 |
+
@app.route('/api/extract-prc', methods=['POST'])
|
| 118 |
+
def api_extract_prc():
|
| 119 |
+
"""Extract PRC ID details"""
|
| 120 |
+
try:
|
| 121 |
+
data = request.json
|
| 122 |
+
document_url = data.get('document_url')
|
| 123 |
+
|
| 124 |
+
if not document_url:
|
| 125 |
+
return jsonify({'error': 'Missing document_url'}), 400
|
| 126 |
+
|
| 127 |
+
result = run_extraction_script('extract_prc.py', document_url)
|
| 128 |
+
return jsonify(result)
|
| 129 |
+
|
| 130 |
+
except Exception as e:
|
| 131 |
+
return jsonify({'success': False, 'error': str(e)}), 500
|
| 132 |
+
|
| 133 |
+
@app.route('/api/extract-umid', methods=['POST'])
|
| 134 |
+
def api_extract_umid():
|
| 135 |
+
"""Extract UMID details"""
|
| 136 |
+
try:
|
| 137 |
+
data = request.json
|
| 138 |
+
document_url = data.get('document_url')
|
| 139 |
+
|
| 140 |
+
if not document_url:
|
| 141 |
+
return jsonify({'error': 'Missing document_url'}), 400
|
| 142 |
+
|
| 143 |
+
result = run_extraction_script('extract_umid.py', document_url)
|
| 144 |
+
return jsonify(result)
|
| 145 |
+
|
| 146 |
+
except Exception as e:
|
| 147 |
+
return jsonify({'success': False, 'error': str(e)}), 500
|
| 148 |
+
|
| 149 |
+
@app.route('/api/extract-sss', methods=['POST'])
|
| 150 |
+
def api_extract_sss():
|
| 151 |
+
"""Extract SSS ID details"""
|
| 152 |
+
try:
|
| 153 |
+
data = request.json
|
| 154 |
+
document_url = data.get('document_url')
|
| 155 |
+
|
| 156 |
+
if not document_url:
|
| 157 |
+
return jsonify({'error': 'Missing document_url'}), 400
|
| 158 |
+
|
| 159 |
+
result = run_extraction_script('extract_sss.py', document_url)
|
| 160 |
+
return jsonify(result)
|
| 161 |
+
|
| 162 |
+
except Exception as e:
|
| 163 |
+
return jsonify({'success': False, 'error': str(e)}), 500
|
| 164 |
+
|
| 165 |
+
@app.route('/api/extract-passport', methods=['POST'])
|
| 166 |
+
def api_extract_passport():
|
| 167 |
+
"""Extract Philippine Passport details"""
|
| 168 |
+
try:
|
| 169 |
+
data = request.json
|
| 170 |
+
document_url = data.get('document_url')
|
| 171 |
+
|
| 172 |
+
if not document_url:
|
| 173 |
+
return jsonify({'error': 'Missing document_url'}), 400
|
| 174 |
+
|
| 175 |
+
result = run_extraction_script('extract_passport.py', document_url)
|
| 176 |
+
return jsonify(result)
|
| 177 |
+
|
| 178 |
+
except Exception as e:
|
| 179 |
+
return jsonify({'success': False, 'error': str(e)}), 500
|
| 180 |
+
|
| 181 |
+
@app.route('/api/extract-postal', methods=['POST'])
|
| 182 |
+
def api_extract_postal():
|
| 183 |
+
"""Extract Postal ID details"""
|
| 184 |
+
try:
|
| 185 |
+
data = request.json
|
| 186 |
+
document_url = data.get('document_url')
|
| 187 |
+
|
| 188 |
+
if not document_url:
|
| 189 |
+
return jsonify({'error': 'Missing document_url'}), 400
|
| 190 |
+
|
| 191 |
+
result = run_extraction_script('extract_postal.py', document_url)
|
| 192 |
+
return jsonify(result)
|
| 193 |
+
|
| 194 |
+
except Exception as e:
|
| 195 |
+
return jsonify({'success': False, 'error': str(e)}), 500
|
| 196 |
+
|
| 197 |
+
@app.route('/api/extract-phic', methods=['POST'])
|
| 198 |
+
def api_extract_phic():
|
| 199 |
+
"""Extract PhilHealth ID details"""
|
| 200 |
+
try:
|
| 201 |
+
data = request.json
|
| 202 |
+
document_url = data.get('document_url')
|
| 203 |
+
|
| 204 |
+
if not document_url:
|
| 205 |
+
return jsonify({'error': 'Missing document_url'}), 400
|
| 206 |
+
|
| 207 |
+
result = run_extraction_script('extract_phic.py', document_url)
|
| 208 |
+
return jsonify(result)
|
| 209 |
+
|
| 210 |
+
except Exception as e:
|
| 211 |
+
return jsonify({'success': False, 'error': str(e)}), 500
|
| 212 |
+
|
| 213 |
+
# ============================================================================
|
| 214 |
+
# CLEARANCE & CERTIFICATE OCR EXTRACTION ENDPOINTS
|
| 215 |
+
# ============================================================================
|
| 216 |
+
|
| 217 |
+
@app.route('/api/extract-nbi', methods=['POST'])
|
| 218 |
+
def api_extract_nbi():
|
| 219 |
+
"""Extract NBI Clearance details"""
|
| 220 |
+
try:
|
| 221 |
+
data = request.json
|
| 222 |
+
document_url = data.get('document_url')
|
| 223 |
+
|
| 224 |
+
if not document_url:
|
| 225 |
+
return jsonify({'error': 'Missing document_url'}), 400
|
| 226 |
+
|
| 227 |
+
result = run_extraction_script('extract_nbi_ocr.py', document_url)
|
| 228 |
+
return jsonify(result)
|
| 229 |
+
|
| 230 |
+
except Exception as e:
|
| 231 |
+
return jsonify({'success': False, 'error': str(e)}), 500
|
| 232 |
+
|
| 233 |
+
@app.route('/api/extract-police-clearance', methods=['POST'])
|
| 234 |
+
def api_extract_police_clearance():
|
| 235 |
+
"""Extract Police Clearance details"""
|
| 236 |
+
try:
|
| 237 |
+
data = request.json
|
| 238 |
+
document_url = data.get('document_url')
|
| 239 |
+
|
| 240 |
+
if not document_url:
|
| 241 |
+
return jsonify({'error': 'Missing document_url'}), 400
|
| 242 |
+
|
| 243 |
+
result = run_extraction_script('extract_police_ocr.py', document_url)
|
| 244 |
+
return jsonify(result)
|
| 245 |
+
|
| 246 |
+
except Exception as e:
|
| 247 |
+
return jsonify({'success': False, 'error': str(e)}), 500
|
| 248 |
+
|
| 249 |
+
@app.route('/api/extract-tesda', methods=['POST'])
|
| 250 |
+
def api_extract_tesda():
|
| 251 |
+
"""Extract TESDA Certificate details"""
|
| 252 |
+
try:
|
| 253 |
+
data = request.json
|
| 254 |
+
document_url = data.get('document_url')
|
| 255 |
+
|
| 256 |
+
if not document_url:
|
| 257 |
+
return jsonify({'error': 'Missing document_url'}), 400
|
| 258 |
+
|
| 259 |
+
result = run_extraction_script('extract_tesda_ocr.py', document_url)
|
| 260 |
+
return jsonify(result)
|
| 261 |
+
|
| 262 |
+
except Exception as e:
|
| 263 |
+
return jsonify({'success': False, 'error': str(e)}), 500
|
| 264 |
+
|
| 265 |
+
# ============================================================================
|
| 266 |
+
# DOCUMENT ANALYSIS ENDPOINT
|
| 267 |
+
# ============================================================================
|
| 268 |
+
|
| 269 |
+
@app.route('/api/analyze-document', methods=['POST'])
|
| 270 |
+
def api_analyze_document():
|
| 271 |
+
"""Analyze and identify document type"""
|
| 272 |
+
try:
|
| 273 |
+
data = request.json
|
| 274 |
+
image_url = data.get('image_url')
|
| 275 |
+
|
| 276 |
+
if not image_url:
|
| 277 |
+
return jsonify({'error': 'Missing image_url'}), 400
|
| 278 |
+
|
| 279 |
+
result = run_extraction_script('analyze_document.py', image_url)
|
| 280 |
+
return jsonify(result)
|
| 281 |
+
|
| 282 |
+
except Exception as e:
|
| 283 |
+
return jsonify({'success': False, 'error': str(e)}), 500
|
| 284 |
+
|
| 285 |
+
# ============================================================================
|
| 286 |
+
# UTILITY ENDPOINTS
|
| 287 |
+
# ============================================================================
|
| 288 |
+
|
| 289 |
+
@app.route('/health', methods=['GET'])
|
| 290 |
+
def health_check():
|
| 291 |
+
"""Health check endpoint"""
|
| 292 |
+
return jsonify({
|
| 293 |
+
'status': 'healthy',
|
| 294 |
+
'service': 'handyhome-ocr-api',
|
| 295 |
+
'version': '1.0.0'
|
| 296 |
+
})
|
| 297 |
+
|
| 298 |
+
@app.route('/', methods=['GET'])
|
| 299 |
+
def index():
|
| 300 |
+
"""API documentation endpoint"""
|
| 301 |
+
return jsonify({
|
| 302 |
+
'service': 'HandyHome OCR Extraction API',
|
| 303 |
+
'version': '1.0.0',
|
| 304 |
+
'description': 'Philippine ID and Document OCR Extraction using PaddleOCR',
|
| 305 |
+
'endpoints': {
|
| 306 |
+
'Philippine IDs': {
|
| 307 |
+
'POST /api/extract-national-id': {
|
| 308 |
+
'description': 'Extract Philippine National ID details',
|
| 309 |
+
'fields': ['id_number', 'full_name', 'birth_date']
|
| 310 |
+
},
|
| 311 |
+
'POST /api/extract-drivers-license': {
|
| 312 |
+
'description': 'Extract Driver\'s License details',
|
| 313 |
+
'fields': ['license_number', 'full_name', 'birth_date', 'address']
|
| 314 |
+
},
|
| 315 |
+
'POST /api/extract-prc': {
|
| 316 |
+
'description': 'Extract PRC ID details',
|
| 317 |
+
'fields': ['prc_number', 'full_name', 'profession', 'valid_until']
|
| 318 |
+
},
|
| 319 |
+
'POST /api/extract-umid': {
|
| 320 |
+
'description': 'Extract UMID details',
|
| 321 |
+
'fields': ['crn', 'full_name', 'birth_date']
|
| 322 |
+
},
|
| 323 |
+
'POST /api/extract-sss': {
|
| 324 |
+
'description': 'Extract SSS ID details',
|
| 325 |
+
'fields': ['sss_number', 'full_name', 'birth_date']
|
| 326 |
+
},
|
| 327 |
+
'POST /api/extract-passport': {
|
| 328 |
+
'description': 'Extract Philippine Passport details',
|
| 329 |
+
'fields': ['passport_number', 'surname', 'given_names', 'birth_date']
|
| 330 |
+
},
|
| 331 |
+
'POST /api/extract-postal': {
|
| 332 |
+
'description': 'Extract Postal ID details',
|
| 333 |
+
'fields': ['prn', 'full_name', 'address', 'birth_date']
|
| 334 |
+
},
|
| 335 |
+
'POST /api/extract-phic': {
|
| 336 |
+
'description': 'Extract PhilHealth ID details',
|
| 337 |
+
'fields': ['id_number', 'full_name', 'birth_date', 'sex', 'address']
|
| 338 |
+
}
|
| 339 |
+
},
|
| 340 |
+
'Clearances & Certificates': {
|
| 341 |
+
'POST /api/extract-nbi': {
|
| 342 |
+
'description': 'Extract NBI Clearance details',
|
| 343 |
+
'fields': ['id_number', 'full_name', 'birth_date']
|
| 344 |
+
},
|
| 345 |
+
'POST /api/extract-police-clearance': {
|
| 346 |
+
'description': 'Extract Police Clearance details',
|
| 347 |
+
'fields': ['id_number', 'full_name', 'address', 'birth_date', 'status']
|
| 348 |
+
},
|
| 349 |
+
'POST /api/extract-tesda': {
|
| 350 |
+
'description': 'Extract TESDA Certificate details',
|
| 351 |
+
'fields': ['registry_number', 'full_name', 'qualification', 'date_issued']
|
| 352 |
+
}
|
| 353 |
+
},
|
| 354 |
+
'Document Analysis': {
|
| 355 |
+
'POST /api/analyze-document': {
|
| 356 |
+
'description': 'Analyze and identify document type from image',
|
| 357 |
+
'fields': ['document_type', 'confidence']
|
| 358 |
+
}
|
| 359 |
+
},
|
| 360 |
+
'Utility': {
|
| 361 |
+
'GET /health': 'Health check endpoint',
|
| 362 |
+
'GET /': 'This API documentation'
|
| 363 |
+
}
|
| 364 |
+
},
|
| 365 |
+
'request_format': {
|
| 366 |
+
'body': {
|
| 367 |
+
'document_url': 'string (required) - URL of the document image to process'
|
| 368 |
+
},
|
| 369 |
+
'example': {
|
| 370 |
+
'document_url': 'https://example.com/national_id.jpg'
|
| 371 |
+
}
|
| 372 |
+
},
|
| 373 |
+
'response_format': {
|
| 374 |
+
'success': 'boolean - Whether extraction was successful',
|
| 375 |
+
'extracted_fields': 'object - Extracted data fields',
|
| 376 |
+
'error': 'string - Error message if failed'
|
| 377 |
+
}
|
| 378 |
+
})
|
| 379 |
+
|
| 380 |
+
@app.route('/api/routes', methods=['GET'])
|
| 381 |
+
def list_routes():
|
| 382 |
+
"""List all available API routes"""
|
| 383 |
+
routes = []
|
| 384 |
+
for rule in app.url_map.iter_rules():
|
| 385 |
+
if rule.endpoint != 'static':
|
| 386 |
+
methods = sorted(rule.methods - {'HEAD', 'OPTIONS'})
|
| 387 |
+
routes.append({
|
| 388 |
+
'endpoint': rule.endpoint,
|
| 389 |
+
'methods': methods,
|
| 390 |
+
'path': rule.rule
|
| 391 |
+
})
|
| 392 |
+
|
| 393 |
+
routes.sort(key=lambda x: x['path'])
|
| 394 |
+
return jsonify({
|
| 395 |
+
'total_routes': len(routes),
|
| 396 |
+
'routes': routes
|
| 397 |
+
})
|
| 398 |
+
|
| 399 |
+
# Launch Flask app
|
| 400 |
+
if __name__ == '__main__':
|
| 401 |
+
port = int(os.environ.get('PORT', 7860))
|
| 402 |
+
app.run(host='0.0.0.0', port=port, debug=False)
|
| 403 |
+
|
extract_drivers_license.py
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys, json, os, glob, re, requests
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from io import BytesIO
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from contextlib import redirect_stdout, redirect_stderr
|
| 6 |
+
|
| 7 |
+
# Immediately redirect all output to stderr except for our final JSON
|
| 8 |
+
original_stdout = sys.stdout
|
| 9 |
+
sys.stdout = sys.stderr
|
| 10 |
+
|
| 11 |
+
# Suppress all PaddleOCR output
|
| 12 |
+
os.environ['PADDLEOCR_LOG_LEVEL'] = 'ERROR'
|
| 13 |
+
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
|
| 14 |
+
os.environ['DISPLAY'] = ':99'
|
| 15 |
+
|
| 16 |
+
# Import PaddleOCR after setting environment variables
|
| 17 |
+
from paddleocr import PaddleOCR
|
| 18 |
+
|
| 19 |
+
def dprint(msg, obj=None):
|
| 20 |
+
try:
|
| 21 |
+
print(f"DEBUG: {msg}" + (f": {obj}" if obj is not None else ""), file=sys.stderr)
|
| 22 |
+
except Exception:
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
def clean_cache():
|
| 26 |
+
cache_files = ['temp_image.jpg', 'temp_image_ocr_res_img.jpg', 'temp_image_preprocessed_img.jpg', 'temp_image_res.json']
|
| 27 |
+
for f in cache_files:
|
| 28 |
+
if os.path.exists(f):
|
| 29 |
+
os.remove(f)
|
| 30 |
+
dprint("Removed cache file", f)
|
| 31 |
+
if os.path.exists("output"):
|
| 32 |
+
import shutil
|
| 33 |
+
shutil.rmtree("output")
|
| 34 |
+
dprint("Removed output directory")
|
| 35 |
+
|
| 36 |
+
def download_image(url, output_path='temp_image.jpg'):
|
| 37 |
+
dprint("Starting download", url)
|
| 38 |
+
clean_cache()
|
| 39 |
+
r = requests.get(url)
|
| 40 |
+
dprint("HTTP status", r.status_code)
|
| 41 |
+
r.raise_for_status()
|
| 42 |
+
img = Image.open(BytesIO(r.content))
|
| 43 |
+
if img.mode == 'RGBA':
|
| 44 |
+
bg = Image.new('RGB', img.size, (255,255,255))
|
| 45 |
+
bg.paste(img, mask=img.split()[-1])
|
| 46 |
+
img = bg
|
| 47 |
+
elif img.mode != 'RGB':
|
| 48 |
+
img = img.convert('RGB')
|
| 49 |
+
img.save(output_path, 'JPEG', quality=95)
|
| 50 |
+
dprint("Saved image", output_path)
|
| 51 |
+
return output_path
|
| 52 |
+
|
| 53 |
+
def format_date(s):
|
| 54 |
+
if not s: return None
|
| 55 |
+
raw = s
|
| 56 |
+
s = s.strip().replace(' ', '').replace('\\','/').replace('.', '/')
|
| 57 |
+
if re.match(r'^\d{4}[-/]\d{2}[-/]\d{2}$', s):
|
| 58 |
+
return s.replace('/', '-')
|
| 59 |
+
m = re.match(r'([A-Za-z]+)(\d{1,2}),?(\d{4})', s)
|
| 60 |
+
if m:
|
| 61 |
+
mo, d, y = m.groups()
|
| 62 |
+
try:
|
| 63 |
+
mnum = datetime.strptime(mo[:3], '%b').month
|
| 64 |
+
return f"{y}-{int(mnum):02d}-{int(d):02d}"
|
| 65 |
+
except Exception:
|
| 66 |
+
pass
|
| 67 |
+
for fmt in ("%B%d,%Y", "%b%d,%Y", "%B %d, %Y", "%b %d, %Y"):
|
| 68 |
+
try:
|
| 69 |
+
return datetime.strptime(raw, fmt).strftime("%Y-%m-%d")
|
| 70 |
+
except Exception:
|
| 71 |
+
pass
|
| 72 |
+
return raw
|
| 73 |
+
|
| 74 |
+
def cap_words(name):
|
| 75 |
+
return None if not name else ' '.join(w.capitalize() for w in name.split())
|
| 76 |
+
|
| 77 |
+
def normalize_full_name(s):
|
| 78 |
+
if not s:
|
| 79 |
+
return None
|
| 80 |
+
raw = ' '.join(s.split())
|
| 81 |
+
|
| 82 |
+
# Format: "LAST, FIRST [SECOND ...] [MIDDLE...]"
|
| 83 |
+
if ',' in raw:
|
| 84 |
+
parts = [p.strip() for p in raw.split(',')]
|
| 85 |
+
last = parts[0] if parts else ''
|
| 86 |
+
given_block = ','.join(parts[1:]).strip() if len(parts) > 1 else ''
|
| 87 |
+
tokens = [t for t in given_block.split(' ') if t]
|
| 88 |
+
# keep up to two given names, drop the rest (assumed middle)
|
| 89 |
+
given_kept = tokens[:2] if tokens else []
|
| 90 |
+
return cap_words(' '.join(given_kept + [last]).strip())
|
| 91 |
+
|
| 92 |
+
# Fallback: "FIRST [SECOND ...] LAST"
|
| 93 |
+
tokens = [t for t in raw.split(' ') if t]
|
| 94 |
+
if len(tokens) >= 2:
|
| 95 |
+
last = tokens[-1]
|
| 96 |
+
given_kept = tokens[:2] # keep first two tokens as given names
|
| 97 |
+
return cap_words(' '.join(given_kept + [last]).strip())
|
| 98 |
+
return cap_words(raw)
|
| 99 |
+
|
| 100 |
+
def take_within(lines, start_idx, max_ahead=5):
|
| 101 |
+
out = []
|
| 102 |
+
for j in range(1, max_ahead+1):
|
| 103 |
+
if start_idx + j < len(lines):
|
| 104 |
+
t = str(lines[start_idx + j]).strip()
|
| 105 |
+
if t:
|
| 106 |
+
out.append(t)
|
| 107 |
+
return out
|
| 108 |
+
|
| 109 |
+
def find_match(ahead, predicate):
|
| 110 |
+
for t in ahead:
|
| 111 |
+
if predicate(t):
|
| 112 |
+
return t
|
| 113 |
+
return None
|
| 114 |
+
|
| 115 |
+
def is_date_text(t):
|
| 116 |
+
t1 = t.replace(' ', '').replace('\\','/').replace('.','/')
|
| 117 |
+
return bool(re.match(r'^\d{4}[-/]\d{2}[-/]\d{2}$', t1)) or bool(re.match(r'^[A-Za-z]+\s*\d{1,2},?\s*\d{4}$', t))
|
| 118 |
+
|
| 119 |
+
def is_weight(t):
|
| 120 |
+
return bool(re.match(r'^\d{2,3}(\.\d+)?$', t))
|
| 121 |
+
|
| 122 |
+
def is_height(t):
|
| 123 |
+
return bool(re.match(r'^\d(\.\d{1,2})$', t)) or bool(re.match(r'^\d\.\d+$', t))
|
| 124 |
+
|
| 125 |
+
def is_sex(t):
|
| 126 |
+
return t.upper() in ('M','F','MALE','FEMALE')
|
| 127 |
+
|
| 128 |
+
def join_name_lines(line1, line2):
|
| 129 |
+
# e.g., "FURAGGANAN,REIN" and "ANDRE DUCUSIN" -> "FURAGGANAN,REIN ANDRE DUCUSIN"
|
| 130 |
+
if not line1: return None
|
| 131 |
+
s = line1
|
| 132 |
+
if line2: s = f"{line1} {line2}"
|
| 133 |
+
return s
|
| 134 |
+
|
| 135 |
+
def extract_drivers_license_info(lines):
|
| 136 |
+
dprint("Lines to extract", lines)
|
| 137 |
+
|
| 138 |
+
license_number = None
|
| 139 |
+
full_name = None
|
| 140 |
+
birth_date = None
|
| 141 |
+
nationality = None
|
| 142 |
+
sex = None
|
| 143 |
+
weight = None
|
| 144 |
+
height = None
|
| 145 |
+
address = None
|
| 146 |
+
expiration_date = None
|
| 147 |
+
agency_code = None
|
| 148 |
+
blood_type = None
|
| 149 |
+
eyes_color = None
|
| 150 |
+
dl_codes = None
|
| 151 |
+
conditions = None
|
| 152 |
+
|
| 153 |
+
i = 0
|
| 154 |
+
L = [str(x or '').strip() for x in lines]
|
| 155 |
+
while i < len(L):
|
| 156 |
+
line = L[i]
|
| 157 |
+
low = line.lower()
|
| 158 |
+
dprint("Line", {"i": i, "text": line})
|
| 159 |
+
|
| 160 |
+
if not license_number and re.match(r'^[A-Z]\d{2}-\d{2}-\d{6}$', line):
|
| 161 |
+
license_number = line
|
| 162 |
+
dprint("Found license_number", license_number)
|
| 163 |
+
|
| 164 |
+
if ('last name' in low and 'first name' in low) or 'last name.first name.middle' in low:
|
| 165 |
+
name_l1 = L[i+1] if i+1 < len(L) else ''
|
| 166 |
+
name_l2 = L[i+2] if i+2 < len(L) else ''
|
| 167 |
+
# Skip a stray "Name" line
|
| 168 |
+
if name_l1.lower() == 'name':
|
| 169 |
+
name_l1, name_l2 = name_l2, (L[i+3] if i+3 < len(L) else '')
|
| 170 |
+
combined = join_name_lines(name_l1, name_l2)
|
| 171 |
+
full_name = normalize_full_name(combined)
|
| 172 |
+
dprint("Found full_name", full_name)
|
| 173 |
+
i += 3
|
| 174 |
+
continue
|
| 175 |
+
|
| 176 |
+
if re.search(r'nation.?l.?ity', low) or 'phl' == line:
|
| 177 |
+
ahead = take_within(L, i, 5)
|
| 178 |
+
cand = find_match(ahead, lambda t: t.upper() in ('PHL','FILIPINO','PHILIPPINES'))
|
| 179 |
+
if cand:
|
| 180 |
+
nationality = cand
|
| 181 |
+
dprint("Found nationality", nationality)
|
| 182 |
+
|
| 183 |
+
if 'sex' in low:
|
| 184 |
+
ahead = take_within(L, i, 5)
|
| 185 |
+
cand = find_match(ahead, is_sex)
|
| 186 |
+
if cand:
|
| 187 |
+
sex = 'M' if cand.upper().startswith('M') else 'F'
|
| 188 |
+
dprint("Found sex", sex)
|
| 189 |
+
|
| 190 |
+
if 'date of birth' in low:
|
| 191 |
+
ahead = take_within(L, i, 5)
|
| 192 |
+
cand = find_match(ahead, is_date_text)
|
| 193 |
+
if cand:
|
| 194 |
+
birth_date = format_date(cand)
|
| 195 |
+
dprint("Found birth_date", birth_date)
|
| 196 |
+
|
| 197 |
+
if birth_date is None and is_date_text(line):
|
| 198 |
+
birth_date = format_date(line)
|
| 199 |
+
dprint("Found standalone birth_date", birth_date)
|
| 200 |
+
|
| 201 |
+
if 'weight' in low:
|
| 202 |
+
ahead = take_within(L, i, 5)
|
| 203 |
+
cand = find_match(ahead, is_weight)
|
| 204 |
+
if cand:
|
| 205 |
+
weight = cand
|
| 206 |
+
dprint("Found weight", weight)
|
| 207 |
+
|
| 208 |
+
if 'height' in low:
|
| 209 |
+
ahead = take_within(L, i, 5)
|
| 210 |
+
cand = find_match(ahead, is_height)
|
| 211 |
+
if cand:
|
| 212 |
+
height = cand
|
| 213 |
+
dprint("Found height", height)
|
| 214 |
+
|
| 215 |
+
if 'address' in low:
|
| 216 |
+
parts = []
|
| 217 |
+
for k in range(1, 4):
|
| 218 |
+
if i+k < len(L):
|
| 219 |
+
t = L[i+k]
|
| 220 |
+
if t and not any(lbl in t.lower() for lbl in ['license', 'expiration', 'agency code', 'blood', 'eyes', 'dl codes', 'conditions']):
|
| 221 |
+
parts.append(t)
|
| 222 |
+
if parts:
|
| 223 |
+
address = ', '.join(parts)
|
| 224 |
+
dprint("Found address", address)
|
| 225 |
+
|
| 226 |
+
if 'expiration date' in low:
|
| 227 |
+
ahead = take_within(L, i, 6)
|
| 228 |
+
cand = find_match(ahead, is_date_text)
|
| 229 |
+
if cand:
|
| 230 |
+
expiration_date = format_date(cand)
|
| 231 |
+
dprint("Found expiration_date", expiration_date)
|
| 232 |
+
|
| 233 |
+
if 'agency code' in low:
|
| 234 |
+
ahead = take_within(L, i, 5)
|
| 235 |
+
cand = find_match(ahead, lambda t: re.match(r'^[A-Z]\d{2}$', t) is not None)
|
| 236 |
+
if cand:
|
| 237 |
+
agency_code = cand
|
| 238 |
+
dprint("Found agency_code", agency_code)
|
| 239 |
+
|
| 240 |
+
if 'blood' in low and 'type' in low:
|
| 241 |
+
ahead = take_within(L, i, 3)
|
| 242 |
+
cand = find_match(ahead, lambda t: re.match(r'^[ABO][+-]?$', t.upper()) is not None)
|
| 243 |
+
if cand:
|
| 244 |
+
blood_type = cand.upper()
|
| 245 |
+
dprint("Found blood_type", blood_type)
|
| 246 |
+
|
| 247 |
+
if 'eyes' in low and 'color' in low:
|
| 248 |
+
ahead = take_within(L, i, 5)
|
| 249 |
+
cand = find_match(ahead, lambda t: t.isalpha())
|
| 250 |
+
if cand:
|
| 251 |
+
eyes_color = cap_words(cand)
|
| 252 |
+
dprint("Found eyes_color", eyes_color)
|
| 253 |
+
|
| 254 |
+
if 'dl codes' in low:
|
| 255 |
+
ahead = take_within(L, i, 3)
|
| 256 |
+
cand = find_match(ahead, lambda t: True) # take first non-empty
|
| 257 |
+
if cand:
|
| 258 |
+
dl_codes = cand
|
| 259 |
+
dprint("Found dl_codes", dl_codes)
|
| 260 |
+
|
| 261 |
+
if 'conditions' in low:
|
| 262 |
+
ahead = take_within(L, i, 3)
|
| 263 |
+
cand = find_match(ahead, lambda t: True)
|
| 264 |
+
if cand:
|
| 265 |
+
conditions = cand
|
| 266 |
+
dprint("Found conditions", conditions)
|
| 267 |
+
|
| 268 |
+
i += 1
|
| 269 |
+
|
| 270 |
+
result = {
|
| 271 |
+
'id_type': 'drivers_license',
|
| 272 |
+
'license_number': license_number,
|
| 273 |
+
'id_number': license_number, # for frontend compatibility
|
| 274 |
+
'full_name': full_name,
|
| 275 |
+
'birth_date': birth_date,
|
| 276 |
+
'nationality': nationality,
|
| 277 |
+
'sex': sex,
|
| 278 |
+
'weight': weight,
|
| 279 |
+
'height': height,
|
| 280 |
+
'address': address,
|
| 281 |
+
'blood_type': blood_type,
|
| 282 |
+
'eyes_color': eyes_color,
|
| 283 |
+
'dl_codes': dl_codes,
|
| 284 |
+
'conditions': conditions,
|
| 285 |
+
'agency_code': agency_code,
|
| 286 |
+
'expiration_date': expiration_date
|
| 287 |
+
}
|
| 288 |
+
dprint("Final result", result)
|
| 289 |
+
return result
|
| 290 |
+
|
| 291 |
+
def extract_ocr_lines(image_path):
|
| 292 |
+
os.makedirs("output", exist_ok=True)
|
| 293 |
+
dprint("Initializing PaddleOCR")
|
| 294 |
+
|
| 295 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 296 |
+
ocr = PaddleOCR(
|
| 297 |
+
use_doc_orientation_classify=False,
|
| 298 |
+
use_doc_unwarping=False,
|
| 299 |
+
use_textline_orientation=False,
|
| 300 |
+
lang='en',
|
| 301 |
+
show_log=False
|
| 302 |
+
)
|
| 303 |
+
dprint("OCR initialized")
|
| 304 |
+
dprint("Running OCR predict", image_path)
|
| 305 |
+
results = ocr.ocr(image_path, cls=False)
|
| 306 |
+
dprint("OCR predict done, results_count", len(results))
|
| 307 |
+
|
| 308 |
+
# Process OCR results directly
|
| 309 |
+
all_text = []
|
| 310 |
+
try:
|
| 311 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 312 |
+
for item in lines:
|
| 313 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 314 |
+
meta = item[1]
|
| 315 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 316 |
+
all_text.append(str(meta[0]))
|
| 317 |
+
except Exception as e:
|
| 318 |
+
dprint("Error processing OCR results", str(e))
|
| 319 |
+
|
| 320 |
+
dprint("All direct texts", all_text)
|
| 321 |
+
return extract_drivers_license_info(all_text) if all_text else {'id_type':'drivers_license','license_number':None,'id_number':None,'full_name':None,'birth_date':None}
|
| 322 |
+
|
| 323 |
+
if len(sys.argv) < 2:
|
| 324 |
+
sys.stdout = original_stdout
|
| 325 |
+
print(json.dumps({"error": "No image URL provided"}))
|
| 326 |
+
sys.exit(1)
|
| 327 |
+
|
| 328 |
+
image_url = sys.argv[1]
|
| 329 |
+
dprint("Processing image URL", image_url)
|
| 330 |
+
try:
|
| 331 |
+
image_path = download_image(image_url)
|
| 332 |
+
dprint("Image downloaded to", image_path)
|
| 333 |
+
ocr_results = extract_ocr_lines(image_path)
|
| 334 |
+
dprint("OCR results ready")
|
| 335 |
+
|
| 336 |
+
# Restore stdout and print only the JSON response
|
| 337 |
+
sys.stdout = original_stdout
|
| 338 |
+
sys.stdout.write(json.dumps({"success": True, "ocr_results": ocr_results}))
|
| 339 |
+
sys.stdout.flush()
|
| 340 |
+
|
| 341 |
+
except Exception as e:
|
| 342 |
+
dprint("Exception", str(e))
|
| 343 |
+
# Restore stdout for error JSON
|
| 344 |
+
sys.stdout = original_stdout
|
| 345 |
+
sys.stdout.write(json.dumps({"error": str(e)}))
|
| 346 |
+
sys.stdout.flush()
|
| 347 |
+
sys.exit(1)
|
| 348 |
+
finally:
|
| 349 |
+
# Clean up
|
| 350 |
+
try:
|
| 351 |
+
clean_cache()
|
| 352 |
+
except:
|
| 353 |
+
pass
|
extract_national_id.py
ADDED
|
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Philippine National ID Information Extraction Script
|
| 4 |
+
|
| 5 |
+
Purpose:
|
| 6 |
+
Extracts structured information from Philippine National ID card images using OCR.
|
| 7 |
+
This script is designed to work as a microservice for document verification systems.
|
| 8 |
+
|
| 9 |
+
Why this script exists:
|
| 10 |
+
- Manual verification of National IDs is time-consuming and error-prone
|
| 11 |
+
- Need standardized data extraction from bilingual (English/Filipino) ID cards
|
| 12 |
+
- Required for automated document verification workflows
|
| 13 |
+
- Supports API integration for web applications
|
| 14 |
+
|
| 15 |
+
Key Features:
|
| 16 |
+
- Extracts 19-digit National ID number (format: XXXX-XXXX-XXXX-XXXX)
|
| 17 |
+
- Handles bilingual labels (English/Filipino)
|
| 18 |
+
- Processes name composition from separate fields
|
| 19 |
+
- Formats dates consistently (ISO format: YYYY-MM-DD)
|
| 20 |
+
- Preserves EXIF metadata when possible
|
| 21 |
+
|
| 22 |
+
Dependencies:
|
| 23 |
+
- PaddleOCR: High-accuracy OCR engine (https://github.com/PaddlePaddle/PaddleOCR)
|
| 24 |
+
- Pillow (PIL): Image processing (https://pillow.readthedocs.io/)
|
| 25 |
+
- requests: HTTP library for image downloads (https://docs.python-requests.org/)
|
| 26 |
+
|
| 27 |
+
Usage:
|
| 28 |
+
python extract_national_id.py "https://example.com/national_id.jpg"
|
| 29 |
+
|
| 30 |
+
Output:
|
| 31 |
+
JSON with extracted information: id_number, full_name, birth_date
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
import sys, json, os, glob, re, requests
|
| 35 |
+
from PIL import Image
|
| 36 |
+
from io import BytesIO
|
| 37 |
+
from datetime import datetime
|
| 38 |
+
from contextlib import redirect_stdout, redirect_stderr
|
| 39 |
+
|
| 40 |
+
# Immediately redirect all output to stderr except for our final JSON
|
| 41 |
+
original_stdout = sys.stdout
|
| 42 |
+
sys.stdout = sys.stderr
|
| 43 |
+
|
| 44 |
+
# Suppress all PaddleOCR output
|
| 45 |
+
os.environ['PADDLEOCR_LOG_LEVEL'] = 'ERROR'
|
| 46 |
+
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
|
| 47 |
+
os.environ['DISPLAY'] = ':99'
|
| 48 |
+
|
| 49 |
+
# Import PaddleOCR after setting environment variables
|
| 50 |
+
from paddleocr import PaddleOCR
|
| 51 |
+
|
| 52 |
+
def clean_cache():
|
| 53 |
+
"""
|
| 54 |
+
Delete all cached images and output files to prevent disk space issues.
|
| 55 |
+
|
| 56 |
+
Why this is needed:
|
| 57 |
+
- OCR processing creates temporary files that can accumulate
|
| 58 |
+
- Prevents disk space exhaustion in production environments
|
| 59 |
+
- Ensures clean state for each new document processing
|
| 60 |
+
"""
|
| 61 |
+
# List of temporary files created by PaddleOCR and our processing
|
| 62 |
+
cache_files = [
|
| 63 |
+
'temp_image.jpg', # Downloaded image
|
| 64 |
+
'temp_image_ocr_res_img.jpg', # OCR result visualization
|
| 65 |
+
'temp_image_preprocessed_img.jpg', # Preprocessed image
|
| 66 |
+
'temp_image_res.json' # OCR result JSON
|
| 67 |
+
]
|
| 68 |
+
|
| 69 |
+
# Delete individual cache files
|
| 70 |
+
for file in cache_files:
|
| 71 |
+
if os.path.exists(file):
|
| 72 |
+
os.remove(file)
|
| 73 |
+
print(f"DEBUG: Removed cached file: {file}", file=sys.stderr)
|
| 74 |
+
|
| 75 |
+
# Delete output directory and its contents
|
| 76 |
+
# This removes any OCR output files from previous runs
|
| 77 |
+
if os.path.exists("output"):
|
| 78 |
+
import shutil
|
| 79 |
+
shutil.rmtree("output")
|
| 80 |
+
print("DEBUG: Removed output directory", file=sys.stderr)
|
| 81 |
+
|
| 82 |
+
def download_image(url, output_path='temp_image.jpg'):
|
| 83 |
+
"""
|
| 84 |
+
Download image from URL and process it for OCR.
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
url (str): URL of the National ID image to process
|
| 88 |
+
output_path (str): Local path to save the downloaded image
|
| 89 |
+
|
| 90 |
+
Returns:
|
| 91 |
+
str: Path to the downloaded and processed image
|
| 92 |
+
|
| 93 |
+
Why this approach:
|
| 94 |
+
- Downloads images from URLs (common in web applications)
|
| 95 |
+
- Converts RGBA to RGB (JPEG doesn't support alpha channels)
|
| 96 |
+
- Preserves EXIF data when possible (important for document authenticity)
|
| 97 |
+
- Uses high quality (95%) to maintain OCR accuracy
|
| 98 |
+
"""
|
| 99 |
+
# Clean cache before downloading new image to prevent conflicts
|
| 100 |
+
clean_cache()
|
| 101 |
+
|
| 102 |
+
# Download the image with error handling
|
| 103 |
+
response = requests.get(url)
|
| 104 |
+
response.raise_for_status() # Raise exception for HTTP errors
|
| 105 |
+
image_data = response.content
|
| 106 |
+
|
| 107 |
+
# First, try to extract EXIF data from the original image
|
| 108 |
+
# EXIF data is important for document authenticity verification
|
| 109 |
+
original_exif = None
|
| 110 |
+
try:
|
| 111 |
+
original_image = Image.open(BytesIO(image_data))
|
| 112 |
+
|
| 113 |
+
# Check if image has EXIF data (some formats don't support it)
|
| 114 |
+
if hasattr(original_image, '_getexif') and original_image._getexif():
|
| 115 |
+
original_exif = original_image._getexif()
|
| 116 |
+
except Exception as e:
|
| 117 |
+
# If EXIF extraction fails, continue without it
|
| 118 |
+
pass
|
| 119 |
+
|
| 120 |
+
# Now process the image for OCR
|
| 121 |
+
image = Image.open(BytesIO(image_data))
|
| 122 |
+
|
| 123 |
+
# Convert to RGB if necessary (JPEG doesn't support alpha channel)
|
| 124 |
+
# This is crucial for OCR accuracy as PaddleOCR expects RGB images
|
| 125 |
+
if image.mode == 'RGBA':
|
| 126 |
+
# Create a white background for transparent images
|
| 127 |
+
background = Image.new('RGB', image.size, (255, 255, 255))
|
| 128 |
+
# Paste the image onto the background using alpha channel as mask
|
| 129 |
+
background.paste(image, mask=image.split()[-1]) # Use alpha channel as mask
|
| 130 |
+
image = background
|
| 131 |
+
elif image.mode != 'RGB':
|
| 132 |
+
# Convert other formats (like L for grayscale) to RGB
|
| 133 |
+
image = image.convert('RGB')
|
| 134 |
+
|
| 135 |
+
# Save as JPG and preserve EXIF data if it existed
|
| 136 |
+
# High quality (95%) maintains OCR accuracy while keeping file size reasonable
|
| 137 |
+
if original_exif:
|
| 138 |
+
image.save(output_path, 'JPEG', quality=95, exif=original_exif)
|
| 139 |
+
else:
|
| 140 |
+
image.save(output_path, 'JPEG', quality=95)
|
| 141 |
+
|
| 142 |
+
print(f"DEBUG: Downloaded and saved image to: {output_path}", file=sys.stderr)
|
| 143 |
+
return output_path
|
| 144 |
+
|
| 145 |
+
# Formatting Helpers
|
| 146 |
+
def format_birth_date(date_str):
|
| 147 |
+
"""
|
| 148 |
+
Format birth date string to ISO format (YYYY-MM-DD).
|
| 149 |
+
|
| 150 |
+
Args:
|
| 151 |
+
date_str (str): Raw date string from OCR
|
| 152 |
+
|
| 153 |
+
Returns:
|
| 154 |
+
str: Formatted date in YYYY-MM-DD format, or original string if parsing fails
|
| 155 |
+
|
| 156 |
+
Why this complexity:
|
| 157 |
+
- OCR often produces inconsistent date formats
|
| 158 |
+
- Philippine IDs use various date formats
|
| 159 |
+
- Need standardized output for database storage
|
| 160 |
+
- Handles common OCR errors like missing spaces
|
| 161 |
+
"""
|
| 162 |
+
if not date_str:
|
| 163 |
+
return None
|
| 164 |
+
|
| 165 |
+
# Try to match formats like 'JULY10,2003' or 'JULY 10, 2003'
|
| 166 |
+
# Remove spaces first to handle OCR inconsistencies
|
| 167 |
+
date_str = date_str.replace(' ', '')
|
| 168 |
+
match = re.match(r'([A-Za-z]+)(\d{1,2}),?(\d{4})', date_str)
|
| 169 |
+
if match:
|
| 170 |
+
month_str, day, year = match.groups()
|
| 171 |
+
try:
|
| 172 |
+
# Convert month name to number using first 3 characters
|
| 173 |
+
month = datetime.strptime(month_str[:3], '%b').month
|
| 174 |
+
return f"{year}-{int(month):02d}-{int(day):02d}"
|
| 175 |
+
except Exception:
|
| 176 |
+
pass
|
| 177 |
+
|
| 178 |
+
# Try to parse with datetime for other possible formats
|
| 179 |
+
# This handles various common date formats found in Philippine IDs
|
| 180 |
+
for fmt in ("%B%d,%Y", "%B%d,%Y", "%b%d,%Y", "%B %d, %Y", "%b %d, %Y", "%Y-%m-%d"):
|
| 181 |
+
try:
|
| 182 |
+
dt = datetime.strptime(date_str, fmt)
|
| 183 |
+
return dt.strftime("%Y-%m-%d")
|
| 184 |
+
except Exception:
|
| 185 |
+
continue
|
| 186 |
+
|
| 187 |
+
# Fallback to original if parsing fails
|
| 188 |
+
return date_str
|
| 189 |
+
|
| 190 |
+
def capitalize_name(name):
|
| 191 |
+
"""
|
| 192 |
+
Properly capitalize name string.
|
| 193 |
+
|
| 194 |
+
Args:
|
| 195 |
+
name (str): Raw name string from OCR
|
| 196 |
+
|
| 197 |
+
Returns:
|
| 198 |
+
str: Properly capitalized name
|
| 199 |
+
|
| 200 |
+
Why this is needed:
|
| 201 |
+
- OCR often produces inconsistent capitalization
|
| 202 |
+
- Need standardized name format for database storage
|
| 203 |
+
- Handles multiple spaces and OCR artifacts
|
| 204 |
+
"""
|
| 205 |
+
if not name:
|
| 206 |
+
return name
|
| 207 |
+
|
| 208 |
+
# Capitalize each word, handling possible multiple spaces or OCR errors
|
| 209 |
+
return ' '.join([w.capitalize() for w in name.split()])
|
| 210 |
+
|
| 211 |
+
# OCR Function
|
| 212 |
+
def extract_id_info(lines):
|
| 213 |
+
"""
|
| 214 |
+
Extract structured information from OCR text lines.
|
| 215 |
+
|
| 216 |
+
Args:
|
| 217 |
+
lines (list): List of text lines from OCR processing
|
| 218 |
+
|
| 219 |
+
Returns:
|
| 220 |
+
dict: Extracted information with keys: id_number, full_name, birth_date
|
| 221 |
+
|
| 222 |
+
Why this approach:
|
| 223 |
+
- Philippine National IDs have specific format requirements
|
| 224 |
+
- ID number is always 19 characters with 3 hyphens
|
| 225 |
+
- Uses bilingual labels (English/Filipino) for field identification
|
| 226 |
+
- Handles cases where labels and values are on separate lines
|
| 227 |
+
"""
|
| 228 |
+
print("DEBUG: Processing lines:", lines, file=sys.stderr)
|
| 229 |
+
|
| 230 |
+
# Initialize variables for extracted information
|
| 231 |
+
id_number = None
|
| 232 |
+
last_name = None
|
| 233 |
+
given_names = None
|
| 234 |
+
birth_date = None
|
| 235 |
+
|
| 236 |
+
# Process each line to find relevant information
|
| 237 |
+
for i in range(len(lines)):
|
| 238 |
+
line = lines[i]
|
| 239 |
+
print(f"DEBUG: Processing line {i}: '{line}'", file=sys.stderr)
|
| 240 |
+
|
| 241 |
+
# Check for National ID number format: XXXX-XXXX-XXXX-XXXX
|
| 242 |
+
# This is the standard format for Philippine National IDs
|
| 243 |
+
if isinstance(line, str) and len(line) == 19 and line.count('-') == 3 and all(part.isdigit() for part in line.split('-')):
|
| 244 |
+
id_number = line
|
| 245 |
+
print(f"DEBUG: Found ID number: {id_number}", file=sys.stderr)
|
| 246 |
+
|
| 247 |
+
# Look for bilingual "Last Name" label
|
| 248 |
+
# Philippine IDs often have both English and Filipino labels
|
| 249 |
+
if line == "Apelyido/Last Name" and i+1 < len(lines):
|
| 250 |
+
last_name = lines[i+1]
|
| 251 |
+
print(f"DEBUG: Found last name: {last_name}", file=sys.stderr)
|
| 252 |
+
|
| 253 |
+
# Look for bilingual "Given Names" label
|
| 254 |
+
if line == "Mga Pangalan/Given Names" and i+1 < len(lines):
|
| 255 |
+
given_names = lines[i+1]
|
| 256 |
+
print(f"DEBUG: Found given names: {given_names}", file=sys.stderr)
|
| 257 |
+
|
| 258 |
+
# Look for bilingual "Date of Birth" label
|
| 259 |
+
if line == "Petsa ng Kapanganakan/Date of Birth" and i+1 < len(lines):
|
| 260 |
+
birth_date = lines[i+1]
|
| 261 |
+
print(f"DEBUG: Found birth date: {birth_date}", file=sys.stderr)
|
| 262 |
+
|
| 263 |
+
# Compose full name from separate fields
|
| 264 |
+
# Philippine names typically follow: Given Names + Last Name
|
| 265 |
+
name_parts = [given_names, last_name]
|
| 266 |
+
full_name = ' '.join([part for part in name_parts if part])
|
| 267 |
+
full_name = capitalize_name(full_name)
|
| 268 |
+
|
| 269 |
+
# Format birth date to ISO standard
|
| 270 |
+
formatted_birth_date = format_birth_date(birth_date) if birth_date else None
|
| 271 |
+
|
| 272 |
+
# Return structured result
|
| 273 |
+
result = {
|
| 274 |
+
'id_number': id_number,
|
| 275 |
+
'full_name': full_name,
|
| 276 |
+
'birth_date': formatted_birth_date
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
print("DEBUG: Final result:", json.dumps(result, indent=2), file=sys.stderr)
|
| 280 |
+
return result
|
| 281 |
+
|
| 282 |
+
def extract_ocr_lines(image_path):
|
| 283 |
+
"""
|
| 284 |
+
Perform OCR on image and extract text lines.
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
image_path (str): Path to the image file
|
| 288 |
+
|
| 289 |
+
Returns:
|
| 290 |
+
dict: Extracted information from the image
|
| 291 |
+
|
| 292 |
+
Why these OCR settings:
|
| 293 |
+
- Disabled expensive features for better performance in production
|
| 294 |
+
- English language setting (Philippine IDs primarily use English)
|
| 295 |
+
- Suppressed logs to keep output clean for API responses
|
| 296 |
+
- Offscreen rendering for server environments
|
| 297 |
+
"""
|
| 298 |
+
# Ensure output directory exists for OCR results
|
| 299 |
+
os.makedirs("output", exist_ok=True)
|
| 300 |
+
|
| 301 |
+
# Initialize PaddleOCR with optimized settings
|
| 302 |
+
# These settings balance accuracy with performance
|
| 303 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 304 |
+
ocr = PaddleOCR(
|
| 305 |
+
use_doc_orientation_classify=False, # Disable for better performance
|
| 306 |
+
use_doc_unwarping=False, # Disable for better performance
|
| 307 |
+
use_textline_orientation=False, # Disable for better performance
|
| 308 |
+
lang='en', # English language
|
| 309 |
+
show_log=False # Suppress logs for clean output
|
| 310 |
+
)
|
| 311 |
+
results = ocr.ocr(image_path, cls=False)
|
| 312 |
+
|
| 313 |
+
# Process OCR results directly
|
| 314 |
+
all_text = []
|
| 315 |
+
try:
|
| 316 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 317 |
+
for item in lines:
|
| 318 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 319 |
+
meta = item[1]
|
| 320 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 321 |
+
all_text.append(str(meta[0]))
|
| 322 |
+
except Exception as e:
|
| 323 |
+
print(f"DEBUG: Error processing OCR results: {str(e)}", file=sys.stderr)
|
| 324 |
+
|
| 325 |
+
print(f"DEBUG: Extracted text lines: {all_text}", file=sys.stderr)
|
| 326 |
+
return extract_id_info(all_text) if all_text else {'id_number': None, 'full_name': None, 'birth_date': None}
|
| 327 |
+
|
| 328 |
+
# Main execution
|
| 329 |
+
if __name__ == "__main__":
|
| 330 |
+
# Validate command line arguments
|
| 331 |
+
if len(sys.argv) < 2:
|
| 332 |
+
print(json.dumps({"error": "No image URL provided"}))
|
| 333 |
+
sys.exit(1)
|
| 334 |
+
|
| 335 |
+
image_url = sys.argv[1]
|
| 336 |
+
print(f"DEBUG: Processing image URL: {image_url}", file=sys.stderr)
|
| 337 |
+
|
| 338 |
+
try:
|
| 339 |
+
# Download and process the image
|
| 340 |
+
image_path = download_image(image_url)
|
| 341 |
+
|
| 342 |
+
# Perform OCR and extract information
|
| 343 |
+
ocr_results = extract_ocr_lines(image_path)
|
| 344 |
+
|
| 345 |
+
# Restore stdout and print only the JSON response
|
| 346 |
+
sys.stdout = original_stdout
|
| 347 |
+
sys.stdout.write(json.dumps({
|
| 348 |
+
"success": True,
|
| 349 |
+
"ocr_results": ocr_results
|
| 350 |
+
}))
|
| 351 |
+
sys.stdout.flush()
|
| 352 |
+
|
| 353 |
+
except Exception as e:
|
| 354 |
+
# Restore stdout for error JSON
|
| 355 |
+
sys.stdout = original_stdout
|
| 356 |
+
sys.stdout.write(json.dumps({"error": str(e)}))
|
| 357 |
+
sys.stdout.flush()
|
| 358 |
+
sys.exit(1)
|
| 359 |
+
finally:
|
| 360 |
+
# Clean up
|
| 361 |
+
try:
|
| 362 |
+
clean_cache()
|
| 363 |
+
except:
|
| 364 |
+
pass
|
extract_nbi_ocr.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys, json, os, glob, requests
|
| 2 |
+
import re
|
| 3 |
+
import time
|
| 4 |
+
import shutil
|
| 5 |
+
from contextlib import redirect_stdout, redirect_stderr
|
| 6 |
+
|
| 7 |
+
# Immediately redirect all output to stderr except for our final JSON
|
| 8 |
+
original_stdout = sys.stdout
|
| 9 |
+
sys.stdout = sys.stderr
|
| 10 |
+
|
| 11 |
+
# Suppress all PaddleOCR output
|
| 12 |
+
os.environ['PADDLEOCR_LOG_LEVEL'] = 'ERROR'
|
| 13 |
+
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
|
| 14 |
+
os.environ['DISPLAY'] = ':99'
|
| 15 |
+
|
| 16 |
+
# Import PaddleOCR after setting environment variables
|
| 17 |
+
from paddleocr import PaddleOCR
|
| 18 |
+
|
| 19 |
+
def download_image(url, output_path='temp_image.jpg'):
|
| 20 |
+
# Remove any existing temp file
|
| 21 |
+
if os.path.exists(output_path):
|
| 22 |
+
os.remove(output_path)
|
| 23 |
+
|
| 24 |
+
# Add cache-busting parameters
|
| 25 |
+
timestamp = int(time.time())
|
| 26 |
+
if '?' in url:
|
| 27 |
+
url += f'&t={timestamp}'
|
| 28 |
+
else:
|
| 29 |
+
url += f'?t={timestamp}'
|
| 30 |
+
|
| 31 |
+
# Add headers to prevent caching
|
| 32 |
+
headers = {
|
| 33 |
+
'Cache-Control': 'no-cache, no-store, must-revalidate',
|
| 34 |
+
'Pragma': 'no-cache',
|
| 35 |
+
'Expires': '0'
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
response = requests.get(url, headers=headers)
|
| 39 |
+
response.raise_for_status()
|
| 40 |
+
image_data = response.content
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# Save the image and verify it's the right one
|
| 44 |
+
with open(output_path, 'wb') as f:
|
| 45 |
+
f.write(image_data)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
return output_path
|
| 49 |
+
|
| 50 |
+
# OCR Function to extract NBI ID NO
|
| 51 |
+
def extract_nbi_id(lines):
|
| 52 |
+
nbi_id = None
|
| 53 |
+
|
| 54 |
+
for i, line in enumerate(lines):
|
| 55 |
+
if isinstance(line, str):
|
| 56 |
+
# Look for "NBI ID NO:" pattern
|
| 57 |
+
if "NBI ID NO:" in line.upper() or "NBIIDNO" in line.upper():
|
| 58 |
+
# Extract the ID after the colon
|
| 59 |
+
parts = line.split(':')
|
| 60 |
+
if len(parts) > 1:
|
| 61 |
+
nbi_id = parts[1].strip()
|
| 62 |
+
break
|
| 63 |
+
# Also check if the next line contains the ID (in case it's on a separate line)
|
| 64 |
+
elif i < len(lines) - 1 and ("NBI ID NO:" in line.upper() or "NBI ID NO" in line.upper()):
|
| 65 |
+
next_line = lines[i + 1]
|
| 66 |
+
if isinstance(next_line, str) and len(next_line.strip()) > 5:
|
| 67 |
+
nbi_id = next_line.strip()
|
| 68 |
+
break
|
| 69 |
+
|
| 70 |
+
# If not found with "NBI ID NO:" pattern, look for the specific format
|
| 71 |
+
if not nbi_id:
|
| 72 |
+
for line in lines:
|
| 73 |
+
if isinstance(line, str):
|
| 74 |
+
# Look for pattern like HGUR87H38D-U47204A873 (alphanumeric with one hyphen)
|
| 75 |
+
pattern = r'[A-Z0-9]{10,12}-[A-Z0-9]{10,12}'
|
| 76 |
+
match = re.search(pattern, line)
|
| 77 |
+
if match:
|
| 78 |
+
nbi_id = match.group()
|
| 79 |
+
break
|
| 80 |
+
|
| 81 |
+
return {
|
| 82 |
+
'id_number': nbi_id,
|
| 83 |
+
'full_name': None,
|
| 84 |
+
'birth_date': None,
|
| 85 |
+
'success': nbi_id is not None
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
def extract_ocr_lines_simple(image_path):
|
| 89 |
+
|
| 90 |
+
# Try with different PaddleOCR settings
|
| 91 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 92 |
+
ocr = PaddleOCR(
|
| 93 |
+
use_doc_orientation_classify=True, # Enable orientation detection
|
| 94 |
+
use_doc_unwarping=True, # Enable document unwarping
|
| 95 |
+
use_textline_orientation=True, # Enable text line orientation
|
| 96 |
+
lang='en', # Set language to English
|
| 97 |
+
show_log=False
|
| 98 |
+
)
|
| 99 |
+
results = ocr.ocr(image_path, cls=True)
|
| 100 |
+
|
| 101 |
+
all_text = []
|
| 102 |
+
try:
|
| 103 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 104 |
+
for item in lines:
|
| 105 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 106 |
+
meta = item[1]
|
| 107 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 108 |
+
all_text.append(str(meta[0]))
|
| 109 |
+
except Exception:
|
| 110 |
+
pass
|
| 111 |
+
|
| 112 |
+
return extract_nbi_id(all_text) if all_text else {'id_number': None, 'full_name': None, 'birth_date': None, 'success': False}
|
| 113 |
+
|
| 114 |
+
def extract_ocr_lines(image_path):
|
| 115 |
+
# Check if file exists and has content
|
| 116 |
+
if not os.path.exists(image_path):
|
| 117 |
+
return {'id_number': None, 'success': False}
|
| 118 |
+
|
| 119 |
+
# Ensure output directory exists
|
| 120 |
+
os.makedirs("output", exist_ok=True)
|
| 121 |
+
|
| 122 |
+
# Clear previous output files
|
| 123 |
+
for old_file in glob.glob("output/*"):
|
| 124 |
+
os.remove(old_file)
|
| 125 |
+
|
| 126 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 127 |
+
ocr = PaddleOCR(
|
| 128 |
+
use_doc_orientation_classify=False,
|
| 129 |
+
use_doc_unwarping=False,
|
| 130 |
+
use_textline_orientation=False,
|
| 131 |
+
show_log=False
|
| 132 |
+
)
|
| 133 |
+
results = ocr.ocr(image_path, cls=False)
|
| 134 |
+
|
| 135 |
+
# Process OCR results directly
|
| 136 |
+
all_text = []
|
| 137 |
+
try:
|
| 138 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 139 |
+
for item in lines:
|
| 140 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 141 |
+
meta = item[1]
|
| 142 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 143 |
+
all_text.append(str(meta[0]))
|
| 144 |
+
except Exception as e:
|
| 145 |
+
print(f"DEBUG: Error processing OCR results: {str(e)}", file=sys.stderr)
|
| 146 |
+
|
| 147 |
+
print(f"DEBUG: Extracted text lines: {all_text}", file=sys.stderr)
|
| 148 |
+
return extract_nbi_id(all_text) if all_text else {'id_number': None, 'full_name': None, 'birth_date': None, 'success': False}
|
| 149 |
+
|
| 150 |
+
# Main
|
| 151 |
+
if len(sys.argv) < 2:
|
| 152 |
+
sys.stdout = original_stdout
|
| 153 |
+
print(json.dumps({"success": False, "error": "No image URL provided"}))
|
| 154 |
+
sys.exit(1)
|
| 155 |
+
|
| 156 |
+
image_url = sys.argv[1]
|
| 157 |
+
print(f"DEBUG: Processing NBI image URL: {image_url}", file=sys.stderr)
|
| 158 |
+
|
| 159 |
+
try:
|
| 160 |
+
image_path = download_image(image_url, f'temp_image.jpg')
|
| 161 |
+
print(f"DEBUG: Image downloaded to: {image_path}", file=sys.stderr)
|
| 162 |
+
|
| 163 |
+
# Try the original OCR method first
|
| 164 |
+
ocr_results = extract_ocr_lines(image_path)
|
| 165 |
+
print(f"DEBUG: OCR results from extract_ocr_lines: {ocr_results}", file=sys.stderr)
|
| 166 |
+
|
| 167 |
+
# If original method fails, try simple method
|
| 168 |
+
if not ocr_results['success']:
|
| 169 |
+
print("DEBUG: Original method failed, trying simple method", file=sys.stderr)
|
| 170 |
+
ocr_results = extract_ocr_lines_simple(image_path)
|
| 171 |
+
print(f"DEBUG: OCR results from extract_ocr_lines_simple: {ocr_results}", file=sys.stderr)
|
| 172 |
+
|
| 173 |
+
# Clean up the temporary file
|
| 174 |
+
if os.path.exists(image_path):
|
| 175 |
+
os.remove(image_path)
|
| 176 |
+
|
| 177 |
+
# Create the response object
|
| 178 |
+
response = {
|
| 179 |
+
"success": ocr_results['success'],
|
| 180 |
+
"ocr_results": ocr_results
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
# Restore stdout and print only the JSON response
|
| 184 |
+
sys.stdout = original_stdout
|
| 185 |
+
sys.stdout.write(json.dumps(response))
|
| 186 |
+
sys.stdout.flush()
|
| 187 |
+
|
| 188 |
+
except Exception as e:
|
| 189 |
+
# Restore stdout for error JSON
|
| 190 |
+
sys.stdout = original_stdout
|
| 191 |
+
sys.stdout.write(json.dumps({"success": False, "error": str(e)}))
|
| 192 |
+
sys.stdout.flush()
|
| 193 |
+
sys.exit(1)
|
| 194 |
+
finally:
|
| 195 |
+
# Clean up
|
| 196 |
+
try:
|
| 197 |
+
if os.path.exists('temp_image.jpg'):
|
| 198 |
+
os.remove('temp_image.jpg')
|
| 199 |
+
except:
|
| 200 |
+
pass
|
extract_passport.py
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Philippine Passport Information Extraction Script
|
| 4 |
+
|
| 5 |
+
Purpose:
|
| 6 |
+
Extracts structured information from Philippine passport images using OCR.
|
| 7 |
+
Handles complex passport layouts with multiple information fields.
|
| 8 |
+
|
| 9 |
+
Why this script exists:
|
| 10 |
+
- Passports have complex layouts with multiple information fields
|
| 11 |
+
- Need to extract international-standard passport information
|
| 12 |
+
- Handles bilingual labels (English/Filipino)
|
| 13 |
+
- Required for passport verification workflows
|
| 14 |
+
|
| 15 |
+
Key Features:
|
| 16 |
+
- Extracts passport number (format: X0000000A)
|
| 17 |
+
- Handles complex name structures (surname, given names, middle name)
|
| 18 |
+
- Processes multiple date fields (birth, issue, expiration)
|
| 19 |
+
- Extracts nationality and place of birth
|
| 20 |
+
- Handles OCR digit correction
|
| 21 |
+
|
| 22 |
+
Dependencies:
|
| 23 |
+
- PaddleOCR: High-accuracy OCR engine (https://github.com/PaddlePaddle/PaddleOCR)
|
| 24 |
+
- Pillow (PIL): Image processing (https://pillow.readthedocs.io/)
|
| 25 |
+
- requests: HTTP library (https://docs.python-requests.org/)
|
| 26 |
+
|
| 27 |
+
Usage:
|
| 28 |
+
python extract_passport.py "https://example.com/passport.jpg"
|
| 29 |
+
|
| 30 |
+
Output:
|
| 31 |
+
JSON with extracted information: passport_number, full_name, birth_date, valid_until, etc.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
import sys, json, os, glob, re, requests
|
| 35 |
+
from PIL import Image
|
| 36 |
+
from io import BytesIO
|
| 37 |
+
from datetime import datetime
|
| 38 |
+
from contextlib import redirect_stdout, redirect_stderr
|
| 39 |
+
|
| 40 |
+
# Route any non-JSON prints to stderr by default
|
| 41 |
+
_ORIG_STDOUT = sys.stdout
|
| 42 |
+
sys.stdout = sys.stderr
|
| 43 |
+
|
| 44 |
+
# Suppress all PaddleOCR output
|
| 45 |
+
os.environ['PADDLEOCR_LOG_LEVEL'] = 'ERROR'
|
| 46 |
+
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
|
| 47 |
+
os.environ['DISPLAY'] = ':99'
|
| 48 |
+
|
| 49 |
+
# Import PaddleOCR after setting environment variables
|
| 50 |
+
from paddleocr import PaddleOCR
|
| 51 |
+
|
| 52 |
+
def dprint(msg, obj=None):
|
| 53 |
+
try:
|
| 54 |
+
print(f"DEBUG: {msg}" + (f": {obj}" if obj is not None else ""), file=sys.stderr)
|
| 55 |
+
except Exception:
|
| 56 |
+
pass
|
| 57 |
+
|
| 58 |
+
def clean_cache():
|
| 59 |
+
files = ['temp_image.jpg', 'temp_image_ocr_res_img.jpg', 'temp_image_preprocessed_img.jpg', 'temp_image_res.json']
|
| 60 |
+
for f in files:
|
| 61 |
+
if os.path.exists(f):
|
| 62 |
+
os.remove(f)
|
| 63 |
+
dprint("Removed cache file", f)
|
| 64 |
+
if os.path.exists("output"):
|
| 65 |
+
import shutil
|
| 66 |
+
shutil.rmtree("output")
|
| 67 |
+
dprint("Removed output directory")
|
| 68 |
+
|
| 69 |
+
def download_image(url, output_path='temp_image.jpg'):
|
| 70 |
+
dprint("Starting download", url)
|
| 71 |
+
clean_cache()
|
| 72 |
+
r = requests.get(url)
|
| 73 |
+
dprint("HTTP status", r.status_code)
|
| 74 |
+
r.raise_for_status()
|
| 75 |
+
img = Image.open(BytesIO(r.content))
|
| 76 |
+
if img.mode == 'RGBA':
|
| 77 |
+
bg = Image.new('RGB', img.size, (255, 255, 255))
|
| 78 |
+
bg.paste(img, mask=img.split()[-1])
|
| 79 |
+
img = bg
|
| 80 |
+
elif img.mode != 'RGB':
|
| 81 |
+
img = img.convert('RGB')
|
| 82 |
+
img.save(output_path, 'JPEG', quality=95)
|
| 83 |
+
dprint("Saved image", output_path)
|
| 84 |
+
return output_path
|
| 85 |
+
|
| 86 |
+
def cap_words(s):
|
| 87 |
+
return None if not s else ' '.join(w.capitalize() for w in s.split())
|
| 88 |
+
|
| 89 |
+
def normalize_digits(s):
|
| 90 |
+
"""
|
| 91 |
+
Fix common OCR digit confusions.
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
s (str): Text string that may contain OCR errors
|
| 95 |
+
|
| 96 |
+
Returns:
|
| 97 |
+
str: Text with corrected digits
|
| 98 |
+
|
| 99 |
+
Why this is needed:
|
| 100 |
+
- OCR often misreads similar-looking characters
|
| 101 |
+
- Common errors: O→0, o→0, I/l→1, S→5, B→8
|
| 102 |
+
- Critical for accurate ID number extraction
|
| 103 |
+
"""
|
| 104 |
+
return (
|
| 105 |
+
str(s)
|
| 106 |
+
.replace('O','0').replace('o','0')
|
| 107 |
+
.replace('I','1').replace('l','1')
|
| 108 |
+
.replace('S','5')
|
| 109 |
+
.replace('B','8')
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
def normalize_full_name(surname, given_names, middle_name=None):
|
| 113 |
+
if not surname and not given_names:
|
| 114 |
+
return None
|
| 115 |
+
|
| 116 |
+
surname = surname.strip() if surname else ""
|
| 117 |
+
given_names = given_names.strip() if given_names else ""
|
| 118 |
+
middle_name = middle_name.strip() if middle_name else ""
|
| 119 |
+
|
| 120 |
+
# Combine given names (first + second if present)
|
| 121 |
+
given_parts = [p for p in given_names.split() if p]
|
| 122 |
+
if len(given_parts) >= 2:
|
| 123 |
+
# Keep first two given names, ignore middle name
|
| 124 |
+
name_parts = [given_parts[0], given_parts[1], surname]
|
| 125 |
+
elif len(given_parts) == 1:
|
| 126 |
+
name_parts = [given_parts[0], surname]
|
| 127 |
+
else:
|
| 128 |
+
name_parts = [surname]
|
| 129 |
+
|
| 130 |
+
return cap_words(' '.join(name_parts))
|
| 131 |
+
|
| 132 |
+
def format_date(s):
|
| 133 |
+
if not s:
|
| 134 |
+
return None
|
| 135 |
+
raw = str(s).strip()
|
| 136 |
+
|
| 137 |
+
# Fix OCR digit issues first
|
| 138 |
+
raw = normalize_digits(raw)
|
| 139 |
+
|
| 140 |
+
# Handle "16MAR1980" format (no spaces)
|
| 141 |
+
try:
|
| 142 |
+
if re.match(r'\d{2}[A-Z]{3}\d{4}', raw):
|
| 143 |
+
return datetime.strptime(raw, "%d%b%Y").strftime("%Y-%m-%d")
|
| 144 |
+
except Exception:
|
| 145 |
+
pass
|
| 146 |
+
|
| 147 |
+
# Handle "16 MAR 1980" format (with spaces)
|
| 148 |
+
try:
|
| 149 |
+
return datetime.strptime(raw, "%d %b %Y").strftime("%Y-%m-%d")
|
| 150 |
+
except Exception:
|
| 151 |
+
pass
|
| 152 |
+
|
| 153 |
+
# Handle "27 JUN 2016" format
|
| 154 |
+
try:
|
| 155 |
+
return datetime.strptime(raw, "%d %b %Y").strftime("%Y-%m-%d")
|
| 156 |
+
except Exception:
|
| 157 |
+
pass
|
| 158 |
+
|
| 159 |
+
# Handle other date formats
|
| 160 |
+
t = raw.replace(' ', '').replace('\\','/').replace('.','/')
|
| 161 |
+
if re.match(r'^\d{4}[-/]\d{2}[-/]\d{2}$', t):
|
| 162 |
+
return t.replace('/', '-')
|
| 163 |
+
if re.match(r'^\d{2}/\d{2}/\d{4}$', raw):
|
| 164 |
+
m, d, y = raw.split('/')
|
| 165 |
+
return f"{y}-{int(m):02d}-{int(d):02d}"
|
| 166 |
+
|
| 167 |
+
return raw
|
| 168 |
+
|
| 169 |
+
def extract_passport_number(text):
|
| 170 |
+
# Fix OCR digits first
|
| 171 |
+
text = normalize_digits(text)
|
| 172 |
+
# Look for passport number pattern like "P0000000A"
|
| 173 |
+
passport_pattern = r'\b([A-Z]\d{7}[A-Z0-9])\b'
|
| 174 |
+
match = re.search(passport_pattern, text)
|
| 175 |
+
if match:
|
| 176 |
+
return match.group(1)
|
| 177 |
+
return None
|
| 178 |
+
|
| 179 |
+
def take_within(lines, i, k=5):
|
| 180 |
+
out = []
|
| 181 |
+
for j in range(1, k+1):
|
| 182 |
+
if i+j < len(lines):
|
| 183 |
+
t = str(lines[i+j]).strip()
|
| 184 |
+
if t:
|
| 185 |
+
out.append(t)
|
| 186 |
+
return out
|
| 187 |
+
|
| 188 |
+
def extract_passport_info(lines):
|
| 189 |
+
"""
|
| 190 |
+
Extract passport information from OCR text lines.
|
| 191 |
+
|
| 192 |
+
Args:
|
| 193 |
+
lines (list): List of text lines from OCR processing
|
| 194 |
+
|
| 195 |
+
Returns:
|
| 196 |
+
dict: Extracted passport information
|
| 197 |
+
|
| 198 |
+
Why this approach:
|
| 199 |
+
- Passports follow ICAO standards with specific formats
|
| 200 |
+
- Complex name structure requires separate handling
|
| 201 |
+
- Multiple date fields need individual processing
|
| 202 |
+
- Uses lookahead pattern matching for field extraction
|
| 203 |
+
"""
|
| 204 |
+
dprint("Lines to extract", lines)
|
| 205 |
+
|
| 206 |
+
# Initialize variables for extracted information
|
| 207 |
+
full_name = None
|
| 208 |
+
surname = None
|
| 209 |
+
given_names = None
|
| 210 |
+
middle_name = None
|
| 211 |
+
passport_number = None
|
| 212 |
+
birth_date = None
|
| 213 |
+
sex = None
|
| 214 |
+
nationality = None
|
| 215 |
+
place_of_birth = None
|
| 216 |
+
date_of_issue = None
|
| 217 |
+
valid_until = None
|
| 218 |
+
issuing_authority = None
|
| 219 |
+
|
| 220 |
+
L = [str(x or '').strip() for x in lines]
|
| 221 |
+
i = 0
|
| 222 |
+
while i < len(L):
|
| 223 |
+
line = L[i]
|
| 224 |
+
low = line.lower()
|
| 225 |
+
dprint("Line", {"i": i, "text": line})
|
| 226 |
+
|
| 227 |
+
# Extract passport number using pattern matching
|
| 228 |
+
if not passport_number:
|
| 229 |
+
passport_num = extract_passport_number(line)
|
| 230 |
+
if passport_num:
|
| 231 |
+
passport_number = passport_num
|
| 232 |
+
dprint("Found passport number", passport_number)
|
| 233 |
+
|
| 234 |
+
# Extract Surname using lookahead pattern
|
| 235 |
+
if 'surname' in low or 'apelyido' in low:
|
| 236 |
+
ahead = take_within(L, i, 3)
|
| 237 |
+
for t in ahead:
|
| 238 |
+
if re.search(r'[A-Z]{2,}', t) and not re.search(r'[0-9]', t):
|
| 239 |
+
surname = t
|
| 240 |
+
dprint("Found surname", surname)
|
| 241 |
+
break
|
| 242 |
+
# Also look for "DELA CRUZ" directly
|
| 243 |
+
if not surname and 'dela' in low and 'cruz' in low:
|
| 244 |
+
surname = line
|
| 245 |
+
dprint("Found surname (direct)", surname)
|
| 246 |
+
|
| 247 |
+
# Extract Given Names
|
| 248 |
+
if 'given' in low and 'name' in low or 'pangalan' in low:
|
| 249 |
+
ahead = take_within(L, i, 3)
|
| 250 |
+
for t in ahead:
|
| 251 |
+
if re.search(r'[A-Z]{2,}', t) and not re.search(r'[0-9]', t):
|
| 252 |
+
given_names = t
|
| 253 |
+
dprint("Found given names", given_names)
|
| 254 |
+
break
|
| 255 |
+
# Also look for "MARIA" directly
|
| 256 |
+
if not given_names and line == 'MARIA':
|
| 257 |
+
given_names = line
|
| 258 |
+
dprint("Found given names (direct)", given_names)
|
| 259 |
+
|
| 260 |
+
# Extract Middle Name
|
| 261 |
+
if 'middle' in low or 'panggitnang' in low:
|
| 262 |
+
ahead = take_within(L, i, 3)
|
| 263 |
+
for t in ahead:
|
| 264 |
+
if re.search(r'[A-Z]{2,}', t) and not re.search(r'[0-9]', t):
|
| 265 |
+
middle_name = t
|
| 266 |
+
dprint("Found middle name", middle_name)
|
| 267 |
+
break
|
| 268 |
+
# Also look for "SANTOS" directly
|
| 269 |
+
if not middle_name and line == 'SANTOS':
|
| 270 |
+
middle_name = line
|
| 271 |
+
dprint("Found middle name (direct)", middle_name)
|
| 272 |
+
|
| 273 |
+
# Extract Date of Birth
|
| 274 |
+
if 'birth' in low or 'kapanganakan' in low:
|
| 275 |
+
ahead = take_within(L, i, 3)
|
| 276 |
+
for t in ahead:
|
| 277 |
+
if re.search(r'\d{1,2}[A-Z]{3}\d{4}', t) or re.search(r'\d{1,2}\s+[A-Z]{3}\s+\d{4}', t):
|
| 278 |
+
birth_date = format_date(t)
|
| 279 |
+
dprint("Found birth date", birth_date)
|
| 280 |
+
break
|
| 281 |
+
# Also look for "16MAR1980" directly
|
| 282 |
+
if not birth_date and re.search(r'\d{1,2}[A-Z]{3}\d{4}', line):
|
| 283 |
+
birth_date = format_date(line)
|
| 284 |
+
dprint("Found birth date (direct)", birth_date)
|
| 285 |
+
|
| 286 |
+
# Extract Sex
|
| 287 |
+
if 'sex' in low or 'kasarian' in low:
|
| 288 |
+
ahead = take_within(L, i, 2)
|
| 289 |
+
for t in ahead:
|
| 290 |
+
if t.upper() in ['M', 'F', 'MALE', 'FEMALE']:
|
| 291 |
+
sex = 'M' if t.upper().startswith('M') else 'F'
|
| 292 |
+
dprint("Found sex", sex)
|
| 293 |
+
break
|
| 294 |
+
# Also look for "F" directly
|
| 295 |
+
if not sex and line == 'F':
|
| 296 |
+
sex = 'F'
|
| 297 |
+
dprint("Found sex (direct)", sex)
|
| 298 |
+
|
| 299 |
+
# Extract Nationality
|
| 300 |
+
if 'nationality' in low or 'nasyonalidad' in low:
|
| 301 |
+
ahead = take_within(L, i, 3)
|
| 302 |
+
for t in ahead:
|
| 303 |
+
if re.search(r'[A-Z]{2,}', t) and not re.search(r'[0-9]', t):
|
| 304 |
+
nationality = t
|
| 305 |
+
dprint("Found nationality", nationality)
|
| 306 |
+
break
|
| 307 |
+
# Also look for "FILIPINO" directly
|
| 308 |
+
if not nationality and line == 'FILIPINO':
|
| 309 |
+
nationality = line
|
| 310 |
+
dprint("Found nationality (direct)", nationality)
|
| 311 |
+
|
| 312 |
+
# Extract Place of Birth
|
| 313 |
+
if 'place' in low and 'birth' in low or 'lugar' in low:
|
| 314 |
+
ahead = take_within(L, i, 3)
|
| 315 |
+
for t in ahead:
|
| 316 |
+
if re.search(r'[A-Z]{2,}', t) and not re.search(r'[0-9]', t):
|
| 317 |
+
place_of_birth = t
|
| 318 |
+
dprint("Found place of birth", place_of_birth)
|
| 319 |
+
break
|
| 320 |
+
# Also look for "MANILA" directly
|
| 321 |
+
if not place_of_birth and line == 'MANILA':
|
| 322 |
+
place_of_birth = line
|
| 323 |
+
dprint("Found place of birth (direct)", place_of_birth)
|
| 324 |
+
|
| 325 |
+
# Extract Date of Issue
|
| 326 |
+
if 'issue' in low or 'pagkakaloob' in low:
|
| 327 |
+
ahead = take_within(L, i, 3)
|
| 328 |
+
for t in ahead:
|
| 329 |
+
if re.search(r'\d{1,2}[A-Z]{3}\d{4}', t) or re.search(r'\d{1,2}\s+[A-Z]{3}\s+\d{4}', t):
|
| 330 |
+
date_of_issue = format_date(t)
|
| 331 |
+
dprint("Found date of issue", date_of_issue)
|
| 332 |
+
break
|
| 333 |
+
# Also look for "27JUN2016" directly
|
| 334 |
+
if not date_of_issue and re.search(r'\d{1,2}[A-Z]{3}\d{4}', line):
|
| 335 |
+
date_of_issue = format_date(line)
|
| 336 |
+
dprint("Found date of issue (direct)", date_of_issue)
|
| 337 |
+
|
| 338 |
+
# Extract Valid Until Date
|
| 339 |
+
if 'valid' in low or 'pagkawalang' in low:
|
| 340 |
+
ahead = take_within(L, i, 3)
|
| 341 |
+
for t in ahead:
|
| 342 |
+
if re.search(r'\d{1,2}\s+[A-Z]{3}\s+\d{4}', t):
|
| 343 |
+
valid_until = format_date(t)
|
| 344 |
+
dprint("Found valid until", valid_until)
|
| 345 |
+
break
|
| 346 |
+
# Also look for "26 JUN 2021" directly
|
| 347 |
+
if not valid_until and re.search(r'\d{1,2}\s+[A-Z]{3}\s+\d{4}', line):
|
| 348 |
+
valid_until = format_date(line)
|
| 349 |
+
dprint("Found valid until (direct)", valid_until)
|
| 350 |
+
|
| 351 |
+
# Extract Issuing Authority
|
| 352 |
+
if 'authority' in low or 'maykapangyarihang' in low:
|
| 353 |
+
ahead = take_within(L, i, 3)
|
| 354 |
+
for t in ahead:
|
| 355 |
+
if re.search(r'[A-Z]{2,}', t) and 'DFA' in t:
|
| 356 |
+
issuing_authority = t
|
| 357 |
+
dprint("Found issuing authority", issuing_authority)
|
| 358 |
+
break
|
| 359 |
+
# Also look for "DFAMANILA" directly
|
| 360 |
+
if not issuing_authority and 'DFA' in line:
|
| 361 |
+
issuing_authority = line
|
| 362 |
+
dprint("Found issuing authority (direct)", issuing_authority)
|
| 363 |
+
|
| 364 |
+
i += 1
|
| 365 |
+
|
| 366 |
+
# Compose full name from separate fields
|
| 367 |
+
if not full_name:
|
| 368 |
+
full_name = normalize_full_name(surname, given_names, middle_name)
|
| 369 |
+
dprint("Composed full name", {"surname": surname, "given": given_names, "middle": middle_name, "full": full_name})
|
| 370 |
+
|
| 371 |
+
# Return structured result
|
| 372 |
+
result = {
|
| 373 |
+
"id_type": "passport",
|
| 374 |
+
"passport_number": passport_number,
|
| 375 |
+
"id_number": passport_number,
|
| 376 |
+
"full_name": full_name,
|
| 377 |
+
"surname": surname,
|
| 378 |
+
"given_names": given_names,
|
| 379 |
+
"middle_name": middle_name,
|
| 380 |
+
"birth_date": birth_date,
|
| 381 |
+
"sex": sex,
|
| 382 |
+
"nationality": nationality,
|
| 383 |
+
"place_of_birth": place_of_birth,
|
| 384 |
+
"date_of_issue": date_of_issue,
|
| 385 |
+
"valid_until": valid_until,
|
| 386 |
+
"issuing_authority": issuing_authority
|
| 387 |
+
}
|
| 388 |
+
dprint("Final result", result)
|
| 389 |
+
return result
|
| 390 |
+
|
| 391 |
+
def extract_ocr_lines(image_path):
|
| 392 |
+
os.makedirs("output", exist_ok=True)
|
| 393 |
+
dprint("Initializing PaddleOCR")
|
| 394 |
+
|
| 395 |
+
# Ensure any internal downloader/progress writes go to stderr, not stdout
|
| 396 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 397 |
+
ocr = PaddleOCR(
|
| 398 |
+
use_doc_orientation_classify=False,
|
| 399 |
+
use_doc_unwarping=False,
|
| 400 |
+
use_textline_orientation=False,
|
| 401 |
+
lang='en',
|
| 402 |
+
show_log=False
|
| 403 |
+
)
|
| 404 |
+
dprint("OCR initialized")
|
| 405 |
+
dprint("Running OCR ocr", image_path)
|
| 406 |
+
results = ocr.ocr(image_path, cls=False)
|
| 407 |
+
try:
|
| 408 |
+
count = len(results[0]) if results and isinstance(results[0], list) else len(results)
|
| 409 |
+
except Exception:
|
| 410 |
+
count = 0
|
| 411 |
+
dprint("OCR ocr done, results_count", count)
|
| 412 |
+
|
| 413 |
+
# Process OCR results directly
|
| 414 |
+
all_text = []
|
| 415 |
+
try:
|
| 416 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 417 |
+
for item in lines:
|
| 418 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 419 |
+
meta = item[1]
|
| 420 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 421 |
+
all_text.append(str(meta[0]))
|
| 422 |
+
except Exception as e:
|
| 423 |
+
dprint("Error processing OCR results", str(e))
|
| 424 |
+
|
| 425 |
+
dprint("All direct texts", all_text)
|
| 426 |
+
return extract_passport_info(all_text) if all_text else {
|
| 427 |
+
"id_type": "passport",
|
| 428 |
+
"passport_number": None,
|
| 429 |
+
"id_number": None,
|
| 430 |
+
"full_name": None,
|
| 431 |
+
"birth_date": None
|
| 432 |
+
}
|
| 433 |
+
|
| 434 |
+
if len(sys.argv) < 2:
|
| 435 |
+
print(json.dumps({"error": "No image URL provided"}))
|
| 436 |
+
sys.exit(1)
|
| 437 |
+
|
| 438 |
+
image_url = sys.argv[1]
|
| 439 |
+
dprint("Processing image URL", image_url)
|
| 440 |
+
try:
|
| 441 |
+
image_path = download_image(image_url)
|
| 442 |
+
dprint("Image downloaded to", image_path)
|
| 443 |
+
ocr_results = extract_ocr_lines(image_path)
|
| 444 |
+
dprint("OCR results ready")
|
| 445 |
+
# Ensure only the final JSON goes to stdout
|
| 446 |
+
sys.stdout = _ORIG_STDOUT
|
| 447 |
+
print(json.dumps({"success": True, "ocr_results": ocr_results}))
|
| 448 |
+
except Exception as e:
|
| 449 |
+
import traceback
|
| 450 |
+
error_msg = str(e)
|
| 451 |
+
traceback_msg = traceback.format_exc()
|
| 452 |
+
dprint("Exception", error_msg)
|
| 453 |
+
dprint("Traceback", traceback_msg)
|
| 454 |
+
print(json.dumps({
|
| 455 |
+
"error": error_msg,
|
| 456 |
+
"traceback": traceback_msg,
|
| 457 |
+
"success": False
|
| 458 |
+
}))
|
| 459 |
+
sys.exit(1)
|
extract_phic.py
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys, json, os, glob, requests
|
| 2 |
+
import re
|
| 3 |
+
import time
|
| 4 |
+
from contextlib import redirect_stdout, redirect_stderr
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
|
| 7 |
+
# Immediately redirect all output to stderr except for our final JSON
|
| 8 |
+
original_stdout = sys.stdout
|
| 9 |
+
sys.stdout = sys.stderr
|
| 10 |
+
|
| 11 |
+
# Suppress all PaddleOCR output
|
| 12 |
+
os.environ['PADDLEOCR_LOG_LEVEL'] = 'ERROR'
|
| 13 |
+
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
|
| 14 |
+
os.environ['DISPLAY'] = ':99'
|
| 15 |
+
|
| 16 |
+
# Import PaddleOCR after setting environment variables
|
| 17 |
+
from paddleocr import PaddleOCR
|
| 18 |
+
|
| 19 |
+
def download_image(url, output_path='temp_phic_image.jpg'):
|
| 20 |
+
# Remove any existing temp file
|
| 21 |
+
if os.path.exists(output_path):
|
| 22 |
+
os.remove(output_path)
|
| 23 |
+
|
| 24 |
+
# Add cache-busting parameters
|
| 25 |
+
timestamp = int(time.time())
|
| 26 |
+
if '?' in url:
|
| 27 |
+
url += f'&t={timestamp}'
|
| 28 |
+
else:
|
| 29 |
+
url += f'?t={timestamp}'
|
| 30 |
+
|
| 31 |
+
# Add headers to prevent caching
|
| 32 |
+
headers = {
|
| 33 |
+
'Cache-Control': 'no-cache, no-store, must-revalidate',
|
| 34 |
+
'Pragma': 'no-cache',
|
| 35 |
+
'Expires': '0'
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
response = requests.get(url, headers=headers)
|
| 39 |
+
response.raise_for_status()
|
| 40 |
+
image_data = response.content
|
| 41 |
+
|
| 42 |
+
# Save the image
|
| 43 |
+
with open(output_path, 'wb') as f:
|
| 44 |
+
f.write(image_data)
|
| 45 |
+
|
| 46 |
+
return output_path
|
| 47 |
+
|
| 48 |
+
def format_date(date_str):
|
| 49 |
+
"""Format date from various formats to YYYY-MM-DD"""
|
| 50 |
+
if not date_str:
|
| 51 |
+
return None
|
| 52 |
+
|
| 53 |
+
date_str = date_str.strip()
|
| 54 |
+
|
| 55 |
+
# Fix common OCR errors first
|
| 56 |
+
date_str = date_str.replace('VULY', 'JULY').replace('VUL', 'JUL')
|
| 57 |
+
date_str = date_str.replace('Juy', 'July')
|
| 58 |
+
date_str = date_str.replace('Januay', 'January').replace('Februay', 'February')
|
| 59 |
+
date_str = date_str.replace('Marc', 'March').replace('Apil', 'April')
|
| 60 |
+
date_str = date_str.replace('Augu', 'August').replace('Septemb', 'September')
|
| 61 |
+
date_str = date_str.replace('Octob', 'October').replace('Novem', 'November')
|
| 62 |
+
date_str = date_str.replace('Decemb', 'December')
|
| 63 |
+
|
| 64 |
+
# Fix period instead of comma: "10.2003" -> "10, 2003"
|
| 65 |
+
date_str = re.sub(r'(\d{1,2})\.(\d{4})', r'\1, \2', date_str)
|
| 66 |
+
|
| 67 |
+
# Handle format like "JULY 10, 2003" or "July 10, 2003"
|
| 68 |
+
match = re.match(r'([A-Za-z]+)\s+(\d{1,2}),?\s+(\d{4})', date_str)
|
| 69 |
+
if match:
|
| 70 |
+
month_str, day, year = match.groups()
|
| 71 |
+
try:
|
| 72 |
+
# Parse month name (handle full month names and abbreviations)
|
| 73 |
+
# Try abbreviation first (first 3 chars)
|
| 74 |
+
try:
|
| 75 |
+
month = datetime.strptime(month_str[:3], '%b').month
|
| 76 |
+
except:
|
| 77 |
+
# Try full month name
|
| 78 |
+
month = datetime.strptime(month_str, '%B').month
|
| 79 |
+
return f"{year}-{month:02d}-{int(day):02d}"
|
| 80 |
+
except Exception as e:
|
| 81 |
+
print(f"DEBUG: Date parsing error: {e}", file=sys.stderr)
|
| 82 |
+
pass
|
| 83 |
+
|
| 84 |
+
# Try other common formats
|
| 85 |
+
for fmt in ["%B %d, %Y", "%b %d, %Y", "%Y-%m-%d", "%m/%d/%Y", "%d/%m/%Y"]:
|
| 86 |
+
try:
|
| 87 |
+
dt = datetime.strptime(date_str, fmt)
|
| 88 |
+
return dt.strftime("%Y-%m-%d")
|
| 89 |
+
except Exception:
|
| 90 |
+
continue
|
| 91 |
+
|
| 92 |
+
return date_str
|
| 93 |
+
|
| 94 |
+
def format_name(name):
|
| 95 |
+
"""Format name: capitalize properly and fix spacing"""
|
| 96 |
+
if not name:
|
| 97 |
+
return None
|
| 98 |
+
|
| 99 |
+
# Ensure comma spacing: "FERNANDEZ,CARL" -> "FERNANDEZ, CARL"
|
| 100 |
+
name = re.sub(r',([A-Z])', r', \1', name)
|
| 101 |
+
name = re.sub(r',\s*([A-Z])', r', \1', name)
|
| 102 |
+
|
| 103 |
+
# Fix missing spaces in name parts: "MATTHEWICOY" -> "MATTHEW ICOY"
|
| 104 |
+
# Add space before capital letters that follow lowercase
|
| 105 |
+
name = re.sub(r'([a-z])([A-Z])', r'\1 \2', name)
|
| 106 |
+
# Add space between consecutive capitals followed by lowercase (name parts)
|
| 107 |
+
name = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1 \2', name)
|
| 108 |
+
|
| 109 |
+
# Remove extra spaces and normalize
|
| 110 |
+
name = ' '.join(name.split())
|
| 111 |
+
|
| 112 |
+
# Capitalize each word properly
|
| 113 |
+
name = ' '.join([word.capitalize() for word in name.split()])
|
| 114 |
+
|
| 115 |
+
return name.strip()
|
| 116 |
+
|
| 117 |
+
def format_address(address_lines):
|
| 118 |
+
"""Format address from multiple lines and fix OCR errors"""
|
| 119 |
+
if not address_lines:
|
| 120 |
+
return None
|
| 121 |
+
|
| 122 |
+
# Join address lines and clean up
|
| 123 |
+
address = ' '.join([line.strip() for line in address_lines if line.strip()])
|
| 124 |
+
|
| 125 |
+
# Fix OCR errors in address
|
| 126 |
+
address = address.replace('DYLABONFACIO', 'BONIFACIO')
|
| 127 |
+
address = address.replace('AVENJE', 'AVENUE')
|
| 128 |
+
address = address.replace('SANIODOMINGO', 'SANTO DOMINGO')
|
| 129 |
+
address = address.replace('SANIO', 'SANTO')
|
| 130 |
+
address = address.replace('AZAL', 'RIZAL')
|
| 131 |
+
address = address.replace('AZAL-', 'RIZAL ')
|
| 132 |
+
|
| 133 |
+
# Fix missing spaces: "071A" -> "071 A"
|
| 134 |
+
address = re.sub(r'(\d+)([A-Z])', r'\1 \2', address)
|
| 135 |
+
|
| 136 |
+
# Fix missing spaces before city/province: "CAINTA AZAL" -> "CAINTA RIZAL"
|
| 137 |
+
address = re.sub(r'([A-Z])([A-Z][a-z]+)', r'\1 \2', address)
|
| 138 |
+
|
| 139 |
+
# Remove extra spaces
|
| 140 |
+
address = ' '.join(address.split())
|
| 141 |
+
|
| 142 |
+
return address.strip()
|
| 143 |
+
|
| 144 |
+
def extract_phic_details(lines):
|
| 145 |
+
details = {
|
| 146 |
+
'id_number': None,
|
| 147 |
+
'full_name': None,
|
| 148 |
+
'birth_date': None,
|
| 149 |
+
'sex': None,
|
| 150 |
+
'address': None,
|
| 151 |
+
'membership_category': None,
|
| 152 |
+
'success': False
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
# Clean lines - convert to strings and strip
|
| 156 |
+
cleaned_lines = [str(line).strip() for line in lines if str(line).strip()]
|
| 157 |
+
|
| 158 |
+
for i, line in enumerate(cleaned_lines):
|
| 159 |
+
line_upper = line.upper().strip()
|
| 160 |
+
line_stripped = line.strip()
|
| 161 |
+
|
| 162 |
+
# Extract ID Number - Format: XX-XXXXXXXXX-X (e.g., "03-026765383-2")
|
| 163 |
+
if not details['id_number']:
|
| 164 |
+
# Look for pattern: digits-digits-digits with hyphens
|
| 165 |
+
id_match = re.search(r'(\d{2}-\d{9}-\d)', line_stripped)
|
| 166 |
+
if id_match:
|
| 167 |
+
details['id_number'] = id_match.group(1)
|
| 168 |
+
# Also check for pattern without hyphens that might be OCR'd incorrectly
|
| 169 |
+
elif re.match(r'^\d{12}$', line_stripped):
|
| 170 |
+
# Format: 030267653832 -> 03-026765383-2
|
| 171 |
+
if len(line_stripped) == 12:
|
| 172 |
+
details['id_number'] = f"{line_stripped[:2]}-{line_stripped[2:11]}-{line_stripped[11]}"
|
| 173 |
+
|
| 174 |
+
# Extract Full Name - usually appears as "LASTNAME, FIRST MIDDLE"
|
| 175 |
+
if not details['full_name']:
|
| 176 |
+
# Look for name pattern with comma (LASTNAME, FIRST MIDDLE)
|
| 177 |
+
if ',' in line_stripped and re.match(r'^[A-Z\s,]+$', line_stripped):
|
| 178 |
+
# Make sure it's not a label
|
| 179 |
+
if not any(label in line_upper for label in ['REPUBLIC', 'PHILIPPINE', 'HEALTH', 'INSURANCE', 'CORPORATION', 'PHILHEALTH', 'DATE', 'BIRTH', 'ADDRESS', 'MEMBERSHIP', 'CATEGORY']):
|
| 180 |
+
details['full_name'] = line_stripped
|
| 181 |
+
# Also check for name parts on separate lines
|
| 182 |
+
elif re.match(r'^[A-Z\s,]+$', line_stripped) and len(line_stripped.split()) >= 2:
|
| 183 |
+
# Make sure it's not a label
|
| 184 |
+
if not any(label in line_upper for label in ['REPUBLIC', 'PHILIPPINE', 'HEALTH', 'INSURANCE', 'CORPORATION', 'PHILHEALTH', 'DATE', 'BIRTH', 'ADDRESS', 'MEMBERSHIP', 'CATEGORY', 'INFORMAL', 'ECONOMY']):
|
| 185 |
+
# Check if previous line might be part of name
|
| 186 |
+
if i > 0:
|
| 187 |
+
prev_line = cleaned_lines[i-1].strip()
|
| 188 |
+
if ',' in prev_line and re.match(r'^[A-Z\s,]+$', prev_line):
|
| 189 |
+
details['full_name'] = prev_line
|
| 190 |
+
elif re.match(r'^[A-Z\s,]+$', line_stripped):
|
| 191 |
+
# Try to combine with previous line if it looks like a name
|
| 192 |
+
if re.match(r'^[A-Z\s,]+$', prev_line) and ',' in prev_line:
|
| 193 |
+
details['full_name'] = prev_line
|
| 194 |
+
|
| 195 |
+
# Extract Date of Birth and Sex - Format: "JULY 10, 2003 - MALE" or "VULY10.2003-MALE" (with OCR errors)
|
| 196 |
+
if not details['birth_date'] or not details['sex']:
|
| 197 |
+
# Look for date pattern followed by sex (handle OCR errors like "VULY10.2003-MALE")
|
| 198 |
+
# Pattern: month name (may have OCR errors) + day (may have period instead of comma) + year + sex
|
| 199 |
+
date_sex_match = re.search(r'([A-Za-z]+)\s*(\d{1,2})[.,]\s*(\d{4})\s*[-]?\s*(MALE|FEMALE)', line_upper)
|
| 200 |
+
if date_sex_match:
|
| 201 |
+
month_str = date_sex_match.group(1)
|
| 202 |
+
day = date_sex_match.group(2)
|
| 203 |
+
year = date_sex_match.group(3)
|
| 204 |
+
sex_str = date_sex_match.group(4)
|
| 205 |
+
|
| 206 |
+
# Fix OCR errors in month: VULY -> JULY
|
| 207 |
+
month_str = month_str.replace('VULY', 'JULY').replace('VUL', 'JUL')
|
| 208 |
+
month_str = month_str.replace('JANUA', 'JANUARY').replace('FEBRUA', 'FEBRUARY')
|
| 209 |
+
month_str = month_str.replace('MARC', 'MARCH').replace('APIL', 'APRIL')
|
| 210 |
+
month_str = month_str.replace('AUGU', 'AUGUST').replace('SEPTEM', 'SEPTEMBER')
|
| 211 |
+
month_str = month_str.replace('OCTO', 'OCTOBER').replace('NOVEM', 'NOVEMBER')
|
| 212 |
+
month_str = month_str.replace('DECEM', 'DECEMBER')
|
| 213 |
+
|
| 214 |
+
if not details['birth_date']:
|
| 215 |
+
details['birth_date'] = f"{month_str} {day}, {year}"
|
| 216 |
+
if not details['sex']:
|
| 217 |
+
details['sex'] = sex_str.capitalize()
|
| 218 |
+
# Also check for date pattern with period instead of comma
|
| 219 |
+
elif not details['birth_date']:
|
| 220 |
+
date_match = re.search(r'([A-Za-z]+)\s*(\d{1,2})[.,]\s*(\d{4})', line_upper)
|
| 221 |
+
if date_match:
|
| 222 |
+
month_str = date_match.group(1)
|
| 223 |
+
day = date_match.group(2)
|
| 224 |
+
year = date_match.group(3)
|
| 225 |
+
# Fix OCR errors
|
| 226 |
+
month_str = month_str.replace('VULY', 'JULY').replace('VUL', 'JUL')
|
| 227 |
+
# Make sure it's not part of address or other field
|
| 228 |
+
if "AVENUE" not in line_upper and "STREET" not in line_upper and "RIZAL" not in line_upper:
|
| 229 |
+
details['birth_date'] = f"{month_str} {day}, {year}"
|
| 230 |
+
# Check for sex alone if date was found separately
|
| 231 |
+
if not details['sex']:
|
| 232 |
+
if "MALE" in line_upper and "FEMALE" not in line_upper:
|
| 233 |
+
details['sex'] = "Male"
|
| 234 |
+
elif "FEMALE" in line_upper:
|
| 235 |
+
details['sex'] = "Female"
|
| 236 |
+
|
| 237 |
+
# Extract Address - usually a long line with street, city, province, zip
|
| 238 |
+
if not details['address']:
|
| 239 |
+
# Look for address indicators (but exclude ID numbers and names)
|
| 240 |
+
# Check if line contains address keywords but not ID pattern or name pattern
|
| 241 |
+
if (any(indicator in line_upper for indicator in ['AVENUE', 'AVENJE', 'STREET', 'ROAD', 'BLVD', 'BOULEVARD', 'CAINTA', 'RIZAL', 'AZAL', 'SANTO', 'SANIO', 'DOMINGO', 'BONIFACIO', 'DYLABONFACIO']) or
|
| 242 |
+
(re.match(r'^\d+', line_stripped) and len(line_stripped) > 5)):
|
| 243 |
+
# Make sure it's not an ID number or name
|
| 244 |
+
is_id = bool(re.search(r'\d{2}-\d{9}-\d', line_stripped))
|
| 245 |
+
is_name = bool(',' in line_stripped and re.match(r'^[A-Z\s,]+$', line_stripped) and len(line_stripped.split()) <= 5)
|
| 246 |
+
|
| 247 |
+
if not is_id and not is_name:
|
| 248 |
+
address_lines = []
|
| 249 |
+
# Collect address lines forward
|
| 250 |
+
for j in range(0, min(3, len(cleaned_lines) - i)):
|
| 251 |
+
addr_line = cleaned_lines[i+j].strip()
|
| 252 |
+
addr_upper = addr_line.upper()
|
| 253 |
+
# Stop if we hit membership category, ID number, name, or other labels
|
| 254 |
+
if (any(label in addr_upper for label in ['MEMBERSHIP', 'CATEGORY', 'INFORMAL', 'ECONOMY', 'DATE', 'BIRTH', 'MALE', 'FEMALE']) or
|
| 255 |
+
re.search(r'\d{2}-\d{9}-\d', addr_line) or
|
| 256 |
+
(',' in addr_line and re.match(r'^[A-Z\s,]+$', addr_line) and len(addr_line.split()) <= 5)):
|
| 257 |
+
break
|
| 258 |
+
# Add if it looks like address content
|
| 259 |
+
if addr_line and len(addr_line) > 2:
|
| 260 |
+
address_lines.append(addr_line)
|
| 261 |
+
if address_lines:
|
| 262 |
+
details['address'] = format_address(address_lines)
|
| 263 |
+
|
| 264 |
+
# Extract Membership Category - usually "INFORMAL ECONOMY" or similar
|
| 265 |
+
if not details['membership_category']:
|
| 266 |
+
if "MEMBERSHIP" in line_upper and "CATEGORY" in line_upper:
|
| 267 |
+
# Category is likely on next line
|
| 268 |
+
if i + 1 < len(cleaned_lines):
|
| 269 |
+
next_line = cleaned_lines[i+1].strip()
|
| 270 |
+
if next_line:
|
| 271 |
+
details['membership_category'] = next_line
|
| 272 |
+
elif "INFORMAL ECONOMY" in line_upper or ("INFORMAL" in line_upper and "ECONOMY" in line_upper):
|
| 273 |
+
details['membership_category'] = "INFORMAL ECONOMY"
|
| 274 |
+
elif any(cat in line_upper for cat in ['INFORMAL', 'EMPLOYED', 'SELF-EMPLOYED', 'VOLUNTARY', 'SPONSORED', 'DEPENDENT']):
|
| 275 |
+
# Check if it's a membership category (not part of address or name)
|
| 276 |
+
if not any(label in line_upper for label in ['AVENUE', 'STREET', 'RIZAL', 'CAINTA', 'FERNANDEZ']):
|
| 277 |
+
details['membership_category'] = line_stripped
|
| 278 |
+
|
| 279 |
+
# Format extracted fields
|
| 280 |
+
if details['full_name']:
|
| 281 |
+
details['full_name'] = format_name(details['full_name'])
|
| 282 |
+
if details['birth_date']:
|
| 283 |
+
details['birth_date'] = format_date(details['birth_date'])
|
| 284 |
+
|
| 285 |
+
if details['id_number'] or details['full_name']:
|
| 286 |
+
details['success'] = True
|
| 287 |
+
|
| 288 |
+
return details
|
| 289 |
+
|
| 290 |
+
def extract_ocr_lines(image_path):
|
| 291 |
+
# Check if file exists
|
| 292 |
+
if not os.path.exists(image_path):
|
| 293 |
+
return {'success': False, 'error': 'File not found'}
|
| 294 |
+
|
| 295 |
+
file_size = os.path.getsize(image_path)
|
| 296 |
+
print(f"DEBUG: Image file size: {file_size} bytes", file=sys.stderr)
|
| 297 |
+
|
| 298 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 299 |
+
# Try simple configuration first
|
| 300 |
+
ocr = PaddleOCR(
|
| 301 |
+
use_doc_orientation_classify=False,
|
| 302 |
+
use_doc_unwarping=False,
|
| 303 |
+
use_textline_orientation=False,
|
| 304 |
+
lang='en'
|
| 305 |
+
)
|
| 306 |
+
try:
|
| 307 |
+
results = ocr.ocr(image_path)
|
| 308 |
+
except Exception as e:
|
| 309 |
+
print(f"DEBUG: ocr() failed: {e}, trying predict()", file=sys.stderr)
|
| 310 |
+
if hasattr(ocr, 'predict'):
|
| 311 |
+
results = ocr.predict(image_path)
|
| 312 |
+
else:
|
| 313 |
+
results = None
|
| 314 |
+
|
| 315 |
+
# Debug: Print raw results structure
|
| 316 |
+
print(f"DEBUG: Raw OCR results type: {type(results)}", file=sys.stderr)
|
| 317 |
+
|
| 318 |
+
all_text = []
|
| 319 |
+
try:
|
| 320 |
+
# Handle both old format (list) and new format (OCRResult object)
|
| 321 |
+
if results and isinstance(results, list) and len(results) > 0:
|
| 322 |
+
first_item = results[0]
|
| 323 |
+
item_type_name = type(first_item).__name__
|
| 324 |
+
is_ocr_result = 'OCRResult' in item_type_name or 'ocr_result' in str(type(first_item)).lower()
|
| 325 |
+
|
| 326 |
+
if is_ocr_result:
|
| 327 |
+
print(f"DEBUG: Detected OCRResult object format (type: {item_type_name})", file=sys.stderr)
|
| 328 |
+
# Access OCRResult as dictionary
|
| 329 |
+
try:
|
| 330 |
+
if hasattr(first_item, 'keys'):
|
| 331 |
+
ocr_dict = dict(first_item)
|
| 332 |
+
# Look for rec_texts key
|
| 333 |
+
if 'rec_texts' in ocr_dict:
|
| 334 |
+
rec_texts = ocr_dict['rec_texts']
|
| 335 |
+
if isinstance(rec_texts, list):
|
| 336 |
+
all_text = [str(t) for t in rec_texts if t]
|
| 337 |
+
print(f"DEBUG: Extracted {len(all_text)} text lines from rec_texts", file=sys.stderr)
|
| 338 |
+
except Exception as e:
|
| 339 |
+
print(f"DEBUG: Error accessing OCRResult: {e}", file=sys.stderr)
|
| 340 |
+
else:
|
| 341 |
+
# Old format - list of lists
|
| 342 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 343 |
+
for item in lines:
|
| 344 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 345 |
+
meta = item[1]
|
| 346 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 347 |
+
all_text.append(str(meta[0]))
|
| 348 |
+
except Exception as e:
|
| 349 |
+
print(f"DEBUG: Error processing OCR results: {str(e)}", file=sys.stderr)
|
| 350 |
+
import traceback
|
| 351 |
+
print(f"DEBUG: Traceback: {traceback.format_exc()}", file=sys.stderr)
|
| 352 |
+
|
| 353 |
+
print(f"DEBUG: Extracted text lines: {all_text}", file=sys.stderr)
|
| 354 |
+
|
| 355 |
+
return extract_phic_details(all_text) if all_text else {
|
| 356 |
+
'id_number': None,
|
| 357 |
+
'full_name': None,
|
| 358 |
+
'birth_date': None,
|
| 359 |
+
'sex': None,
|
| 360 |
+
'address': None,
|
| 361 |
+
'membership_category': None,
|
| 362 |
+
'success': False
|
| 363 |
+
}
|
| 364 |
+
|
| 365 |
+
# Main Execution
|
| 366 |
+
if len(sys.argv) < 2:
|
| 367 |
+
sys.stdout = original_stdout
|
| 368 |
+
print(json.dumps({"success": False, "error": "No image URL provided"}))
|
| 369 |
+
sys.exit(1)
|
| 370 |
+
|
| 371 |
+
image_url = sys.argv[1]
|
| 372 |
+
print(f"DEBUG: Processing PhilHealth ID image URL: {image_url}", file=sys.stderr)
|
| 373 |
+
|
| 374 |
+
try:
|
| 375 |
+
image_path = download_image(image_url, 'temp_phic_image.jpg')
|
| 376 |
+
print(f"DEBUG: Image downloaded to: {image_path}", file=sys.stderr)
|
| 377 |
+
|
| 378 |
+
ocr_results = extract_ocr_lines(image_path)
|
| 379 |
+
print(f"DEBUG: OCR results: {ocr_results}", file=sys.stderr)
|
| 380 |
+
|
| 381 |
+
# Clean up
|
| 382 |
+
if os.path.exists(image_path):
|
| 383 |
+
os.remove(image_path)
|
| 384 |
+
|
| 385 |
+
response = {
|
| 386 |
+
"success": ocr_results['success'],
|
| 387 |
+
"data": ocr_results
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
sys.stdout = original_stdout
|
| 391 |
+
sys.stdout.write(json.dumps(response))
|
| 392 |
+
sys.stdout.flush()
|
| 393 |
+
|
| 394 |
+
except Exception as e:
|
| 395 |
+
sys.stdout = original_stdout
|
| 396 |
+
sys.stdout.write(json.dumps({"success": False, "error": str(e)}))
|
| 397 |
+
sys.stdout.flush()
|
| 398 |
+
sys.exit(1)
|
| 399 |
+
finally:
|
| 400 |
+
try:
|
| 401 |
+
if os.path.exists('temp_phic_image.jpg'):
|
| 402 |
+
os.remove('temp_phic_image.jpg')
|
| 403 |
+
except:
|
| 404 |
+
pass
|
| 405 |
+
|
extract_police_ocr.py
ADDED
|
@@ -0,0 +1,812 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys, json, os, glob, requests
|
| 2 |
+
import re
|
| 3 |
+
import time
|
| 4 |
+
from contextlib import redirect_stdout, redirect_stderr
|
| 5 |
+
|
| 6 |
+
# Immediately redirect all output to stderr except for our final JSON
|
| 7 |
+
original_stdout = sys.stdout
|
| 8 |
+
sys.stdout = sys.stderr
|
| 9 |
+
|
| 10 |
+
# Suppress all PaddleOCR output
|
| 11 |
+
os.environ['PADDLEOCR_LOG_LEVEL'] = 'ERROR'
|
| 12 |
+
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
|
| 13 |
+
os.environ['DISPLAY'] = ':99'
|
| 14 |
+
|
| 15 |
+
# Import PaddleOCR after setting environment variables
|
| 16 |
+
from paddleocr import PaddleOCR
|
| 17 |
+
|
| 18 |
+
def download_image(url, output_path='temp_police_image.jpg'):
|
| 19 |
+
# Remove any existing temp file
|
| 20 |
+
if os.path.exists(output_path):
|
| 21 |
+
os.remove(output_path)
|
| 22 |
+
|
| 23 |
+
# Add cache-busting parameters
|
| 24 |
+
timestamp = int(time.time())
|
| 25 |
+
if '?' in url:
|
| 26 |
+
url += f'&t={timestamp}'
|
| 27 |
+
else:
|
| 28 |
+
url += f'?t={timestamp}'
|
| 29 |
+
|
| 30 |
+
# Add headers to prevent caching
|
| 31 |
+
headers = {
|
| 32 |
+
'Cache-Control': 'no-cache, no-store, must-revalidate',
|
| 33 |
+
'Pragma': 'no-cache',
|
| 34 |
+
'Expires': '0'
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
response = requests.get(url, headers=headers)
|
| 38 |
+
response.raise_for_status()
|
| 39 |
+
image_data = response.content
|
| 40 |
+
|
| 41 |
+
# Save the image
|
| 42 |
+
with open(output_path, 'wb') as f:
|
| 43 |
+
f.write(image_data)
|
| 44 |
+
|
| 45 |
+
return output_path
|
| 46 |
+
|
| 47 |
+
def format_name(name):
|
| 48 |
+
"""Format name: add proper spacing and commas (generic for all police clearances)
|
| 49 |
+
|
| 50 |
+
Handles common OCR issues like missing spaces between name parts and missing comma spacing.
|
| 51 |
+
Works with any name format, not specific to one document.
|
| 52 |
+
"""
|
| 53 |
+
if not name:
|
| 54 |
+
return None
|
| 55 |
+
|
| 56 |
+
# Remove extra spaces and normalize
|
| 57 |
+
name = ' '.join(name.split())
|
| 58 |
+
|
| 59 |
+
# First, ensure comma spacing: "JAVA,ALBERT" -> "JAVA, ALBERT"
|
| 60 |
+
name = re.sub(r',([A-Z])', r', \1', name)
|
| 61 |
+
name = re.sub(r',\s*([A-Z])', r', \1', name)
|
| 62 |
+
|
| 63 |
+
# Split by comma if present
|
| 64 |
+
if ',' in name:
|
| 65 |
+
parts = name.split(',')
|
| 66 |
+
formatted_parts = []
|
| 67 |
+
for part in parts:
|
| 68 |
+
part = part.strip()
|
| 69 |
+
# Handle consecutive capitals: "JAVAALBERTJOY" -> "JAVA ALBERT JOY"
|
| 70 |
+
# Strategy: split where a capital letter is followed by another capital + lowercase
|
| 71 |
+
# "ALBERTJOY" -> "ALBERT JOY"
|
| 72 |
+
part = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1 \2', part)
|
| 73 |
+
# Handle remaining cases: "JOYBAUTISTA" -> "JOY BAUTISTA"
|
| 74 |
+
part = re.sub(r'([A-Z][a-z]+)([A-Z][a-z]+)', r'\1 \2', part)
|
| 75 |
+
formatted_parts.append(part)
|
| 76 |
+
name = ', '.join(formatted_parts)
|
| 77 |
+
else:
|
| 78 |
+
# No comma, try to add spaces between name parts
|
| 79 |
+
# "JAVAALBERTJOY BAUTISTA" -> "JAVA ALBERT JOY BAUTISTA"
|
| 80 |
+
# Add space before capital letters that follow lowercase
|
| 81 |
+
name = re.sub(r'([a-z])([A-Z])', r'\1 \2', name)
|
| 82 |
+
# Add space between consecutive capitals: "JAVAALBERT" -> "JAVA ALBERT"
|
| 83 |
+
# But be careful: "BAUTISTA" should stay together
|
| 84 |
+
# Split where we have multiple capitals followed by a capital+lowercase
|
| 85 |
+
name = re.sub(r'([A-Z]{2,})([A-Z][a-z])', r'\1 \2', name)
|
| 86 |
+
# Also handle: "ALBERTJOY" -> "ALBERT JOY"
|
| 87 |
+
name = re.sub(r'([A-Z]+)([A-Z][a-z]+)', r'\1 \2', name)
|
| 88 |
+
|
| 89 |
+
# Clean up multiple spaces
|
| 90 |
+
name = ' '.join(name.split())
|
| 91 |
+
|
| 92 |
+
return name.strip()
|
| 93 |
+
|
| 94 |
+
def format_address(address):
|
| 95 |
+
"""Format address: add proper spacing (generic for all police clearances)"""
|
| 96 |
+
if not address:
|
| 97 |
+
return None
|
| 98 |
+
|
| 99 |
+
# Remove extra spaces
|
| 100 |
+
address = ' '.join(address.split())
|
| 101 |
+
|
| 102 |
+
# Handle #BLK/#BLOCK pattern: ensure space after # if followed by letters and numbers
|
| 103 |
+
# "#BLK11" -> "#BLK 11", "#BLOCK5" -> "#BLOCK 5"
|
| 104 |
+
address = re.sub(r'#([A-Z]+)(\d+)', r'#\1 \2', address)
|
| 105 |
+
|
| 106 |
+
# Add space before city names and common address parts (capital followed by capital+lowercase)
|
| 107 |
+
# "CAMPOTINIO" -> "CAMPO TINIO", "CABANATUANCITY" -> "CABANATUAN CITY"
|
| 108 |
+
address = re.sub(r'([A-Z])([A-Z][a-z]+)', r'\1 \2', address)
|
| 109 |
+
|
| 110 |
+
# Ensure comma spacing: "CITY,NUEVA" -> "CITY, NUEVA"
|
| 111 |
+
address = re.sub(r',([A-Z])', r', \1', address)
|
| 112 |
+
address = re.sub(r',\s*([A-Z])', r', \1', address)
|
| 113 |
+
|
| 114 |
+
# Clean up multiple spaces
|
| 115 |
+
address = ' '.join(address.split())
|
| 116 |
+
|
| 117 |
+
return address.strip()
|
| 118 |
+
|
| 119 |
+
def format_birth_place(place):
|
| 120 |
+
"""Format birth place: add proper spacing (generic for all police clearances)"""
|
| 121 |
+
if not place:
|
| 122 |
+
return None
|
| 123 |
+
|
| 124 |
+
# Remove extra spaces
|
| 125 |
+
place = ' '.join(place.split())
|
| 126 |
+
|
| 127 |
+
# Ensure comma spacing: "DILASAG,AURORA" -> "DILASAG, AURORA"
|
| 128 |
+
place = re.sub(r',([A-Z])', r', \1', place)
|
| 129 |
+
place = re.sub(r',\s*([A-Z])', r', \1', place)
|
| 130 |
+
|
| 131 |
+
# Add space before province/region names if needed
|
| 132 |
+
# "PLACE PROVINCE" -> already spaced, but handle "PLACEPROVINCE" -> "PLACE PROVINCE"
|
| 133 |
+
place = re.sub(r'([A-Z])([A-Z][a-z]+)', r'\1 \2', place)
|
| 134 |
+
|
| 135 |
+
# Clean up multiple spaces
|
| 136 |
+
place = ' '.join(place.split())
|
| 137 |
+
|
| 138 |
+
return place.strip()
|
| 139 |
+
|
| 140 |
+
def format_birth_date(date):
|
| 141 |
+
"""Format birth date: fix common OCR errors (generic for all police clearances)"""
|
| 142 |
+
if not date:
|
| 143 |
+
return None
|
| 144 |
+
|
| 145 |
+
# Fix common OCR errors for month names (universal issues)
|
| 146 |
+
date = date.replace('Juy', 'July') # Common OCR error
|
| 147 |
+
date = date.replace('Januay', 'January')
|
| 148 |
+
date = date.replace('Februay', 'February')
|
| 149 |
+
date = date.replace('Marc', 'March')
|
| 150 |
+
date = date.replace('Apil', 'April')
|
| 151 |
+
date = date.replace('Jun', 'June') # Be careful - June is valid, but "Jun" might be incomplete
|
| 152 |
+
date = date.replace('Augu', 'August')
|
| 153 |
+
date = date.replace('Septemb', 'September')
|
| 154 |
+
date = date.replace('Octob', 'October')
|
| 155 |
+
date = date.replace('Novemb', 'November')
|
| 156 |
+
date = date.replace('Decemb', 'December')
|
| 157 |
+
|
| 158 |
+
# Fix year errors: "1905" when it should be "05" (day) - common OCR issue
|
| 159 |
+
# Pattern: "July 1905, 1991" -> "July 05, 1991"
|
| 160 |
+
# Check if we have a pattern like "Month 19XX, YYYY" where 19XX is likely the day misread
|
| 161 |
+
match = re.search(r'(\w+)\s+19(\d{2}),\s*(\d{4})', date)
|
| 162 |
+
if match:
|
| 163 |
+
day = match.group(2)
|
| 164 |
+
year = match.group(3)
|
| 165 |
+
# If day is 00-31, it's likely a day, not a year
|
| 166 |
+
if 0 <= int(day) <= 31:
|
| 167 |
+
date = re.sub(r'(\w+)\s+19(\d{2}),\s*(\d{4})', rf'\1 {day}, \3', date)
|
| 168 |
+
|
| 169 |
+
# Ensure proper date format: "July 05, 1991"
|
| 170 |
+
date = re.sub(r'(\w+)\s+(\d{1,2})\s*,\s*(\d{4})', r'\1 \2, \3', date)
|
| 171 |
+
|
| 172 |
+
# Clean up multiple spaces
|
| 173 |
+
date = ' '.join(date.split())
|
| 174 |
+
|
| 175 |
+
return date.strip()
|
| 176 |
+
|
| 177 |
+
def extract_police_details(lines):
|
| 178 |
+
details = {
|
| 179 |
+
'id_number': None,
|
| 180 |
+
'full_name': None,
|
| 181 |
+
'address': None,
|
| 182 |
+
'birth_date': None,
|
| 183 |
+
'birth_place': None,
|
| 184 |
+
'citizenship': None,
|
| 185 |
+
'gender': None,
|
| 186 |
+
'status': None,
|
| 187 |
+
'success': False
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
for i, line in enumerate(lines):
|
| 191 |
+
if not isinstance(line, str):
|
| 192 |
+
continue
|
| 193 |
+
|
| 194 |
+
line_upper = line.upper().strip()
|
| 195 |
+
line_stripped = line.strip()
|
| 196 |
+
|
| 197 |
+
# Extract Name - handle cases where NAME and value are on separate lines
|
| 198 |
+
if "NAME" in line_upper and not details['full_name']:
|
| 199 |
+
if ":" in line:
|
| 200 |
+
parts = line.split(':', 1)
|
| 201 |
+
if len(parts) > 1:
|
| 202 |
+
name_part = parts[1].strip()
|
| 203 |
+
if name_part and len(name_part) > 2:
|
| 204 |
+
details['full_name'] = name_part
|
| 205 |
+
elif i + 1 < len(lines):
|
| 206 |
+
# Check next few lines for name value
|
| 207 |
+
for j in range(1, min(3, len(lines) - i)):
|
| 208 |
+
next_line = lines[i+j].strip()
|
| 209 |
+
if next_line.startswith(':') and len(next_line) > 1:
|
| 210 |
+
name_part = next_line[1:].strip()
|
| 211 |
+
if name_part and len(name_part) > 2 and "ADDRESS" not in name_part.upper():
|
| 212 |
+
details['full_name'] = name_part
|
| 213 |
+
break
|
| 214 |
+
elif not next_line.startswith(('ADDRESS', 'BIRTH', 'CITIZEN', 'GENDER', 'ID')) and len(next_line) > 2:
|
| 215 |
+
if ":" not in next_line or (":" in next_line and next_line.index(':') < 3):
|
| 216 |
+
name_part = next_line.replace(':', '').strip()
|
| 217 |
+
if name_part and len(name_part) > 2:
|
| 218 |
+
details['full_name'] = name_part
|
| 219 |
+
break
|
| 220 |
+
|
| 221 |
+
# Also check for name patterns that start with colon (OCR sometimes splits NAME label)
|
| 222 |
+
if not details['full_name'] and line_stripped.startswith(':') and len(line_stripped) > 5:
|
| 223 |
+
name_candidate = line_stripped[1:].strip()
|
| 224 |
+
# Check if it looks like a name (has commas, multiple words, etc.)
|
| 225 |
+
if ',' in name_candidate or (len(name_candidate.split()) >= 2 and name_candidate.isupper()):
|
| 226 |
+
# Make sure previous line wasn't ADDRESS or other label
|
| 227 |
+
if i > 0:
|
| 228 |
+
prev_line = lines[i-1].strip().upper()
|
| 229 |
+
if "ADDRESS" not in prev_line and "BIRTH" not in prev_line:
|
| 230 |
+
details['full_name'] = name_candidate
|
| 231 |
+
|
| 232 |
+
# Extract Address
|
| 233 |
+
if "ADDRESS" in line_upper and not details['address']:
|
| 234 |
+
if ":" in line:
|
| 235 |
+
parts = line.split(':')
|
| 236 |
+
if len(parts) > 1:
|
| 237 |
+
addr_part = parts[1].strip()
|
| 238 |
+
if addr_part:
|
| 239 |
+
details['address'] = addr_part
|
| 240 |
+
elif i + 1 < len(lines):
|
| 241 |
+
# Check next few lines for address value
|
| 242 |
+
addr_parts = []
|
| 243 |
+
for j in range(1, min(4, len(lines) - i)):
|
| 244 |
+
next_line = lines[i+j].strip()
|
| 245 |
+
if next_line.startswith(':') and len(next_line) > 1:
|
| 246 |
+
addr_parts.append(next_line[1:].strip())
|
| 247 |
+
elif "BIRTH" not in next_line.upper() and "CITIZEN" not in next_line.upper():
|
| 248 |
+
if ":" in next_line:
|
| 249 |
+
parts = next_line.split(':', 1)
|
| 250 |
+
if len(parts) > 1:
|
| 251 |
+
addr_parts.append(parts[1].strip())
|
| 252 |
+
elif len(next_line) > 2:
|
| 253 |
+
addr_parts.append(next_line)
|
| 254 |
+
else:
|
| 255 |
+
break
|
| 256 |
+
if addr_parts:
|
| 257 |
+
details['address'] = ' '.join(addr_parts).strip()
|
| 258 |
+
|
| 259 |
+
# Extract Birth Date - handle OCR errors and combined patterns
|
| 260 |
+
if ("BIRTH DATE" in line_upper or "BIRTHDATE" in line_upper) and not details['birth_date']:
|
| 261 |
+
if ":" in line:
|
| 262 |
+
parts = line.split(':', 1)
|
| 263 |
+
if len(parts) > 1:
|
| 264 |
+
date_part = parts[1].strip()
|
| 265 |
+
# Fix common OCR errors
|
| 266 |
+
date_part = date_part.replace('Juy', 'July').replace('Juy', 'July')
|
| 267 |
+
# Fix year errors (1001 -> 1991, etc.)
|
| 268 |
+
date_part = re.sub(r'\b1001\b', '1991', date_part)
|
| 269 |
+
date_part = re.sub(r'\b(\d{2})\b', lambda m: '19' + m.group(1) if len(m.group(1)) == 2 and int(m.group(1)) < 50 else m.group(1), date_part)
|
| 270 |
+
if date_part:
|
| 271 |
+
details['birth_date'] = date_part
|
| 272 |
+
elif i + 1 < len(lines):
|
| 273 |
+
next_line = lines[i+1].strip()
|
| 274 |
+
if ":" in next_line:
|
| 275 |
+
parts = next_line.split(':', 1)
|
| 276 |
+
if len(parts) > 1:
|
| 277 |
+
date_part = parts[1].strip()
|
| 278 |
+
date_part = date_part.replace('Juy', 'July')
|
| 279 |
+
date_part = re.sub(r'\b1001\b', '1991', date_part)
|
| 280 |
+
if date_part:
|
| 281 |
+
details['birth_date'] = date_part
|
| 282 |
+
|
| 283 |
+
# Also look for date patterns in lines that might have been OCR'd incorrectly
|
| 284 |
+
if not details['birth_date']:
|
| 285 |
+
# Look for patterns like "Juy 05, 1001" or "July 03, 1991"
|
| 286 |
+
date_pattern = re.search(r'(January|February|March|April|May|June|July|August|September|October|November|December|Juy|Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+\d{1,2}[,\s]+\d{4}', line_upper)
|
| 287 |
+
if date_pattern:
|
| 288 |
+
date_part = date_pattern.group()
|
| 289 |
+
date_part = date_part.replace('Juy', 'July')
|
| 290 |
+
date_part = re.sub(r'\b1001\b', '1991', date_part)
|
| 291 |
+
details['birth_date'] = date_part
|
| 292 |
+
|
| 293 |
+
# Extract Birth Place
|
| 294 |
+
if "BIRTH PLACE" in line_upper and not details['birth_place']:
|
| 295 |
+
if ":" in line:
|
| 296 |
+
parts = line.split(':', 1)
|
| 297 |
+
if len(parts) > 1:
|
| 298 |
+
details['birth_place'] = parts[1].strip()
|
| 299 |
+
elif i + 1 < len(lines):
|
| 300 |
+
next_line = lines[i+1].strip()
|
| 301 |
+
if next_line.startswith(':') and len(next_line) > 1:
|
| 302 |
+
details['birth_place'] = next_line[1:].strip()
|
| 303 |
+
elif ":" in next_line and "CITIZEN" not in next_line.upper():
|
| 304 |
+
parts = next_line.split(':', 1)
|
| 305 |
+
if len(parts) > 1:
|
| 306 |
+
details['birth_place'] = parts[1].strip()
|
| 307 |
+
|
| 308 |
+
# Extract Citizenship
|
| 309 |
+
if "CITIZENSHIP" in line_upper and not details['citizenship']:
|
| 310 |
+
if ":" in line:
|
| 311 |
+
parts = line.split(':', 1)
|
| 312 |
+
if len(parts) > 1:
|
| 313 |
+
details['citizenship'] = parts[1].strip()
|
| 314 |
+
elif i + 1 < len(lines):
|
| 315 |
+
next_line = lines[i+1].strip()
|
| 316 |
+
if next_line.startswith(':') and len(next_line) > 1:
|
| 317 |
+
details['citizenship'] = next_line[1:].strip()
|
| 318 |
+
elif ":" in next_line:
|
| 319 |
+
parts = next_line.split(':', 1)
|
| 320 |
+
if len(parts) > 1:
|
| 321 |
+
details['citizenship'] = parts[1].strip()
|
| 322 |
+
|
| 323 |
+
# Extract Gender - handle cases where GENDER and value are on separate lines
|
| 324 |
+
if "GENDER" in line_upper and not details['gender']:
|
| 325 |
+
if ":" in line:
|
| 326 |
+
parts = line.split(':', 1)
|
| 327 |
+
if len(parts) > 1:
|
| 328 |
+
details['gender'] = parts[1].strip()
|
| 329 |
+
elif i + 1 < len(lines):
|
| 330 |
+
next_line = lines[i+1].strip()
|
| 331 |
+
if next_line.startswith(':') and len(next_line) > 1:
|
| 332 |
+
gender_part = next_line[1:].strip()
|
| 333 |
+
if gender_part in ['MALE', 'FEMALE', 'M', 'F']:
|
| 334 |
+
details['gender'] = gender_part
|
| 335 |
+
elif ":" in next_line:
|
| 336 |
+
parts = next_line.split(':', 1)
|
| 337 |
+
if len(parts) > 1:
|
| 338 |
+
gender_part = parts[1].strip()
|
| 339 |
+
if gender_part in ['MALE', 'FEMALE', 'M', 'F']:
|
| 340 |
+
details['gender'] = gender_part
|
| 341 |
+
|
| 342 |
+
# Extract ID Number (Usually "ID No.:" or near QR code)
|
| 343 |
+
if "ID NO" in line_upper or "ID NO." in line_upper:
|
| 344 |
+
parts = line.split(':')
|
| 345 |
+
if len(parts) > 1:
|
| 346 |
+
details['id_number'] = parts[1].strip()
|
| 347 |
+
|
| 348 |
+
# Fallback ID extraction looking for specific patterns if not found by label
|
| 349 |
+
if not details['id_number']:
|
| 350 |
+
# Look for pattern like TRARH + digits
|
| 351 |
+
id_match = re.search(r'\b[A-Z]{4,5}\d{10,15}\b', line_upper)
|
| 352 |
+
if id_match:
|
| 353 |
+
details['id_number'] = id_match.group()
|
| 354 |
+
|
| 355 |
+
# Extract Status (e.g., "NO RECORD ON FILE")
|
| 356 |
+
if "NO RECORD ON FILE" in line_upper:
|
| 357 |
+
details['status'] = "NO RECORD ON FILE"
|
| 358 |
+
elif "HAS A RECORD" in line_upper or "WITH RECORD" in line_upper:
|
| 359 |
+
details['status'] = "HAS RECORD"
|
| 360 |
+
|
| 361 |
+
if details['full_name'] or details['id_number']:
|
| 362 |
+
details['success'] = True
|
| 363 |
+
|
| 364 |
+
# Format the extracted fields
|
| 365 |
+
if details['full_name']:
|
| 366 |
+
details['full_name'] = format_name(details['full_name'])
|
| 367 |
+
if details['address']:
|
| 368 |
+
details['address'] = format_address(details['address'])
|
| 369 |
+
if details['birth_place']:
|
| 370 |
+
details['birth_place'] = format_birth_place(details['birth_place'])
|
| 371 |
+
if details['birth_date']:
|
| 372 |
+
details['birth_date'] = format_birth_date(details['birth_date'])
|
| 373 |
+
|
| 374 |
+
return details
|
| 375 |
+
|
| 376 |
+
def extract_ocr_lines(image_path):
|
| 377 |
+
# Check if file exists
|
| 378 |
+
if not os.path.exists(image_path):
|
| 379 |
+
return {'success': False, 'error': 'File not found'}
|
| 380 |
+
|
| 381 |
+
file_size = os.path.getsize(image_path)
|
| 382 |
+
print(f"DEBUG: Image file size: {file_size} bytes", file=sys.stderr)
|
| 383 |
+
|
| 384 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 385 |
+
# Try simple configuration first (matching NBI script primary method)
|
| 386 |
+
ocr = PaddleOCR(
|
| 387 |
+
use_doc_orientation_classify=False,
|
| 388 |
+
use_doc_unwarping=False,
|
| 389 |
+
use_textline_orientation=False,
|
| 390 |
+
lang='en'
|
| 391 |
+
)
|
| 392 |
+
try:
|
| 393 |
+
results = ocr.ocr(image_path)
|
| 394 |
+
except Exception as e:
|
| 395 |
+
print(f"DEBUG: ocr() failed: {e}, trying predict()", file=sys.stderr)
|
| 396 |
+
if hasattr(ocr, 'predict'):
|
| 397 |
+
results = ocr.predict(image_path)
|
| 398 |
+
else:
|
| 399 |
+
results = None
|
| 400 |
+
|
| 401 |
+
# Debug: Print raw results structure
|
| 402 |
+
print(f"DEBUG: Raw OCR results type: {type(results)}", file=sys.stderr)
|
| 403 |
+
print(f"DEBUG: Raw OCR results is None: {results is None}", file=sys.stderr)
|
| 404 |
+
if results is not None:
|
| 405 |
+
print(f"DEBUG: Raw OCR results length: {len(results) if isinstance(results, list) else 'N/A'}", file=sys.stderr)
|
| 406 |
+
if isinstance(results, list) and len(results) > 0:
|
| 407 |
+
print(f"DEBUG: First level item type: {type(results[0])}", file=sys.stderr)
|
| 408 |
+
print(f"DEBUG: First level item: {str(results[0])[:200] if results[0] else 'None'}", file=sys.stderr)
|
| 409 |
+
if isinstance(results[0], list) and len(results[0]) > 0:
|
| 410 |
+
print(f"DEBUG: Second level first item: {str(results[0][0])[:200] if results[0][0] else 'None'}", file=sys.stderr)
|
| 411 |
+
|
| 412 |
+
# Process OCR results - handle both old format (list) and new format (OCRResult object)
|
| 413 |
+
all_text = []
|
| 414 |
+
try:
|
| 415 |
+
# Check if results contain OCRResult objects (new PaddleX format)
|
| 416 |
+
if results and isinstance(results, list) and len(results) > 0:
|
| 417 |
+
first_item = results[0]
|
| 418 |
+
# Check if it's an OCRResult object by type name
|
| 419 |
+
item_type_name = type(first_item).__name__
|
| 420 |
+
is_ocr_result = 'OCRResult' in item_type_name or 'ocr_result' in str(type(first_item)).lower()
|
| 421 |
+
|
| 422 |
+
if is_ocr_result:
|
| 423 |
+
print(f"DEBUG: Detected OCRResult object format (type: {item_type_name})", file=sys.stderr)
|
| 424 |
+
# Inspect attributes
|
| 425 |
+
attrs = dir(first_item)
|
| 426 |
+
print(f"DEBUG: OCRResult attributes: {[a for a in attrs if not a.startswith('_')]}", file=sys.stderr)
|
| 427 |
+
|
| 428 |
+
for ocr_result in results:
|
| 429 |
+
# Try various possible attribute names for text
|
| 430 |
+
text_found = False
|
| 431 |
+
|
| 432 |
+
# First, try accessing as dictionary (OCRResult is dict-like)
|
| 433 |
+
try:
|
| 434 |
+
if hasattr(ocr_result, 'keys'):
|
| 435 |
+
ocr_dict = dict(ocr_result)
|
| 436 |
+
print(f"DEBUG: OCRResult as dict keys: {list(ocr_dict.keys())}", file=sys.stderr)
|
| 437 |
+
|
| 438 |
+
# Look for common OCR result keys (rec_texts is the actual key in PaddleX OCRResult)
|
| 439 |
+
for key in ['rec_texts', 'rec_text', 'dt_polys', 'ocr_text', 'text', 'texts', 'result', 'results', 'ocr_result', 'dt_boxes']:
|
| 440 |
+
if key in ocr_dict:
|
| 441 |
+
val = ocr_dict[key]
|
| 442 |
+
print(f"DEBUG: Found key '{key}': {type(val)}, length: {len(val) if isinstance(val, list) else 'N/A'}", file=sys.stderr)
|
| 443 |
+
if isinstance(val, list):
|
| 444 |
+
# rec_texts is a list of strings directly
|
| 445 |
+
if key == 'rec_texts':
|
| 446 |
+
for text_item in val:
|
| 447 |
+
if isinstance(text_item, str) and text_item.strip():
|
| 448 |
+
all_text.append(text_item.strip())
|
| 449 |
+
elif text_item:
|
| 450 |
+
all_text.append(str(text_item))
|
| 451 |
+
if val:
|
| 452 |
+
text_found = True
|
| 453 |
+
else:
|
| 454 |
+
# For other keys, try to extract text from nested structures
|
| 455 |
+
for item in val:
|
| 456 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 457 |
+
# Format: [[coords], (text, confidence)]
|
| 458 |
+
text_part = item[1]
|
| 459 |
+
if isinstance(text_part, (list, tuple)) and len(text_part) >= 1:
|
| 460 |
+
all_text.append(str(text_part[0]))
|
| 461 |
+
elif isinstance(item, str):
|
| 462 |
+
all_text.append(item)
|
| 463 |
+
if val:
|
| 464 |
+
text_found = True
|
| 465 |
+
elif isinstance(val, str) and val:
|
| 466 |
+
all_text.append(val)
|
| 467 |
+
text_found = True
|
| 468 |
+
if text_found:
|
| 469 |
+
break
|
| 470 |
+
except Exception as e:
|
| 471 |
+
print(f"DEBUG: Error accessing OCRResult as dict: {e}", file=sys.stderr)
|
| 472 |
+
|
| 473 |
+
# Try json() method
|
| 474 |
+
if not text_found:
|
| 475 |
+
try:
|
| 476 |
+
if hasattr(ocr_result, 'json'):
|
| 477 |
+
json_data = ocr_result.json()
|
| 478 |
+
print(f"DEBUG: OCRResult.json() type: {type(json_data)}", file=sys.stderr)
|
| 479 |
+
if isinstance(json_data, dict):
|
| 480 |
+
print(f"DEBUG: OCRResult.json() keys: {list(json_data.keys())}", file=sys.stderr)
|
| 481 |
+
# Look for text in JSON (rec_texts is the actual key)
|
| 482 |
+
for key in ['rec_texts', 'rec_text', 'dt_polys', 'ocr_text', 'text', 'texts', 'result', 'results']:
|
| 483 |
+
if key in json_data:
|
| 484 |
+
val = json_data[key]
|
| 485 |
+
if isinstance(val, list):
|
| 486 |
+
# rec_texts is a list of strings directly
|
| 487 |
+
if key == 'rec_texts':
|
| 488 |
+
for text_item in val:
|
| 489 |
+
if isinstance(text_item, str) and text_item.strip():
|
| 490 |
+
all_text.append(text_item.strip())
|
| 491 |
+
elif text_item:
|
| 492 |
+
all_text.append(str(text_item))
|
| 493 |
+
if val:
|
| 494 |
+
text_found = True
|
| 495 |
+
else:
|
| 496 |
+
for item in val:
|
| 497 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 498 |
+
text_part = item[1]
|
| 499 |
+
if isinstance(text_part, (list, tuple)) and len(text_part) >= 1:
|
| 500 |
+
all_text.append(str(text_part[0]))
|
| 501 |
+
elif isinstance(item, str):
|
| 502 |
+
all_text.append(item)
|
| 503 |
+
if val:
|
| 504 |
+
text_found = True
|
| 505 |
+
elif isinstance(val, str) and val:
|
| 506 |
+
all_text.append(val)
|
| 507 |
+
text_found = True
|
| 508 |
+
if text_found:
|
| 509 |
+
break
|
| 510 |
+
except Exception as e:
|
| 511 |
+
print(f"DEBUG: Error calling json(): {e}", file=sys.stderr)
|
| 512 |
+
|
| 513 |
+
# Try rec_text attribute
|
| 514 |
+
if not text_found and hasattr(ocr_result, 'rec_text'):
|
| 515 |
+
rec_text = ocr_result.rec_text
|
| 516 |
+
print(f"DEBUG: Found rec_text attribute: {type(rec_text)}", file=sys.stderr)
|
| 517 |
+
if isinstance(rec_text, list):
|
| 518 |
+
all_text.extend([str(t) for t in rec_text if t])
|
| 519 |
+
text_found = True
|
| 520 |
+
elif rec_text:
|
| 521 |
+
all_text.append(str(rec_text))
|
| 522 |
+
text_found = True
|
| 523 |
+
|
| 524 |
+
# Try text attribute
|
| 525 |
+
if not text_found and hasattr(ocr_result, 'text'):
|
| 526 |
+
text = ocr_result.text
|
| 527 |
+
print(f"DEBUG: Found text attribute: {type(text)}", file=sys.stderr)
|
| 528 |
+
if isinstance(text, list):
|
| 529 |
+
all_text.extend([str(t) for t in text if t])
|
| 530 |
+
text_found = True
|
| 531 |
+
elif text:
|
| 532 |
+
all_text.append(str(text))
|
| 533 |
+
text_found = True
|
| 534 |
+
|
| 535 |
+
# If still no text, print full structure for debugging
|
| 536 |
+
if not text_found:
|
| 537 |
+
print(f"DEBUG: Could not find text in OCRResult, trying to inspect structure", file=sys.stderr)
|
| 538 |
+
try:
|
| 539 |
+
print(f"DEBUG: OCRResult repr: {repr(ocr_result)[:500]}", file=sys.stderr)
|
| 540 |
+
# Try to get all keys/items
|
| 541 |
+
if hasattr(ocr_result, 'keys'):
|
| 542 |
+
try:
|
| 543 |
+
all_keys = list(ocr_result.keys())
|
| 544 |
+
print(f"DEBUG: All OCRResult keys: {all_keys}", file=sys.stderr)
|
| 545 |
+
for key in all_keys:
|
| 546 |
+
try:
|
| 547 |
+
val = ocr_result[key]
|
| 548 |
+
print(f"DEBUG: Key '{key}' type: {type(val)}, value preview: {str(val)[:100]}", file=sys.stderr)
|
| 549 |
+
except:
|
| 550 |
+
pass
|
| 551 |
+
except:
|
| 552 |
+
pass
|
| 553 |
+
except Exception as e:
|
| 554 |
+
print(f"DEBUG: Error inspecting structure: {e}", file=sys.stderr)
|
| 555 |
+
else:
|
| 556 |
+
# Old format - list of lists
|
| 557 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 558 |
+
print(f"DEBUG: Processing lines (old format), count: {len(lines) if isinstance(lines, list) else 'N/A'}", file=sys.stderr)
|
| 559 |
+
for item in lines:
|
| 560 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 561 |
+
meta = item[1]
|
| 562 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 563 |
+
all_text.append(str(meta[0]))
|
| 564 |
+
except Exception as e:
|
| 565 |
+
print(f"DEBUG: Error processing OCR results: {str(e)}", file=sys.stderr)
|
| 566 |
+
import traceback
|
| 567 |
+
print(f"DEBUG: Traceback: {traceback.format_exc()}", file=sys.stderr)
|
| 568 |
+
# Try to inspect the object attributes
|
| 569 |
+
if results and isinstance(results, list) and len(results) > 0:
|
| 570 |
+
first_item = results[0]
|
| 571 |
+
print(f"DEBUG: First item attributes: {dir(first_item)}", file=sys.stderr)
|
| 572 |
+
if hasattr(first_item, '__dict__'):
|
| 573 |
+
print(f"DEBUG: First item dict: {first_item.__dict__}", file=sys.stderr)
|
| 574 |
+
|
| 575 |
+
print(f"DEBUG: Extracted text lines: {all_text}", file=sys.stderr)
|
| 576 |
+
|
| 577 |
+
return extract_police_details(all_text) if all_text else {'id_number': None, 'full_name': None, 'address': None, 'birth_date': None, 'birth_place': None, 'citizenship': None, 'gender': None, 'status': None, 'success': False}
|
| 578 |
+
|
| 579 |
+
def extract_ocr_lines_simple(image_path):
|
| 580 |
+
# Fallback method with advanced features (matching NBI script fallback)
|
| 581 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 582 |
+
ocr = PaddleOCR(
|
| 583 |
+
use_doc_orientation_classify=True,
|
| 584 |
+
use_doc_unwarping=True,
|
| 585 |
+
use_textline_orientation=True,
|
| 586 |
+
lang='en'
|
| 587 |
+
)
|
| 588 |
+
results = ocr.ocr(image_path)
|
| 589 |
+
|
| 590 |
+
# Debug: Print raw results structure for fallback method
|
| 591 |
+
print(f"DEBUG (fallback): Raw OCR results type: {type(results)}", file=sys.stderr)
|
| 592 |
+
print(f"DEBUG (fallback): Raw OCR results is None: {results is None}", file=sys.stderr)
|
| 593 |
+
if results is not None:
|
| 594 |
+
print(f"DEBUG (fallback): Raw OCR results length: {len(results) if isinstance(results, list) else 'N/A'}", file=sys.stderr)
|
| 595 |
+
if isinstance(results, list) and len(results) > 0:
|
| 596 |
+
print(f"DEBUG (fallback): First level item type: {type(results[0])}", file=sys.stderr)
|
| 597 |
+
if isinstance(results[0], list) and len(results[0]) > 0:
|
| 598 |
+
print(f"DEBUG (fallback): Second level first item: {str(results[0][0])[:200] if results[0][0] else 'None'}", file=sys.stderr)
|
| 599 |
+
|
| 600 |
+
all_text = []
|
| 601 |
+
try:
|
| 602 |
+
# Check if results contain OCRResult objects (new PaddleX format)
|
| 603 |
+
if results and isinstance(results, list) and len(results) > 0:
|
| 604 |
+
first_item = results[0]
|
| 605 |
+
# Check if it's an OCRResult object by type name
|
| 606 |
+
item_type_name = type(first_item).__name__
|
| 607 |
+
is_ocr_result = 'OCRResult' in item_type_name or 'ocr_result' in str(type(first_item)).lower()
|
| 608 |
+
|
| 609 |
+
if is_ocr_result:
|
| 610 |
+
print(f"DEBUG (fallback): Detected OCRResult object format (type: {item_type_name})", file=sys.stderr)
|
| 611 |
+
# Inspect attributes
|
| 612 |
+
attrs = dir(first_item)
|
| 613 |
+
print(f"DEBUG (fallback): OCRResult attributes: {[a for a in attrs if not a.startswith('_')]}", file=sys.stderr)
|
| 614 |
+
|
| 615 |
+
for ocr_result in results:
|
| 616 |
+
# Try various possible attribute names for text
|
| 617 |
+
text_found = False
|
| 618 |
+
|
| 619 |
+
# First, try accessing as dictionary (OCRResult is dict-like)
|
| 620 |
+
try:
|
| 621 |
+
if hasattr(ocr_result, 'keys'):
|
| 622 |
+
ocr_dict = dict(ocr_result)
|
| 623 |
+
print(f"DEBUG (fallback): OCRResult as dict keys: {list(ocr_dict.keys())}", file=sys.stderr)
|
| 624 |
+
|
| 625 |
+
# Look for common OCR result keys (rec_texts is the actual key in PaddleX OCRResult)
|
| 626 |
+
for key in ['rec_texts', 'rec_text', 'dt_polys', 'ocr_text', 'text', 'texts', 'result', 'results', 'ocr_result', 'dt_boxes']:
|
| 627 |
+
if key in ocr_dict:
|
| 628 |
+
val = ocr_dict[key]
|
| 629 |
+
print(f"DEBUG (fallback): Found key '{key}': {type(val)}, length: {len(val) if isinstance(val, list) else 'N/A'}", file=sys.stderr)
|
| 630 |
+
if isinstance(val, list):
|
| 631 |
+
# rec_texts is a list of strings directly
|
| 632 |
+
if key == 'rec_texts':
|
| 633 |
+
for text_item in val:
|
| 634 |
+
if isinstance(text_item, str) and text_item.strip():
|
| 635 |
+
all_text.append(text_item.strip())
|
| 636 |
+
elif text_item:
|
| 637 |
+
all_text.append(str(text_item))
|
| 638 |
+
if val:
|
| 639 |
+
text_found = True
|
| 640 |
+
else:
|
| 641 |
+
# For other keys, try to extract text from nested structures
|
| 642 |
+
for item in val:
|
| 643 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 644 |
+
# Format: [[coords], (text, confidence)]
|
| 645 |
+
text_part = item[1]
|
| 646 |
+
if isinstance(text_part, (list, tuple)) and len(text_part) >= 1:
|
| 647 |
+
all_text.append(str(text_part[0]))
|
| 648 |
+
elif isinstance(item, str):
|
| 649 |
+
all_text.append(item)
|
| 650 |
+
if val:
|
| 651 |
+
text_found = True
|
| 652 |
+
elif isinstance(val, str) and val:
|
| 653 |
+
all_text.append(val)
|
| 654 |
+
text_found = True
|
| 655 |
+
if text_found:
|
| 656 |
+
break
|
| 657 |
+
except Exception as e:
|
| 658 |
+
print(f"DEBUG (fallback): Error accessing OCRResult as dict: {e}", file=sys.stderr)
|
| 659 |
+
|
| 660 |
+
# Try json() method
|
| 661 |
+
if not text_found:
|
| 662 |
+
try:
|
| 663 |
+
if hasattr(ocr_result, 'json'):
|
| 664 |
+
json_data = ocr_result.json()
|
| 665 |
+
print(f"DEBUG (fallback): OCRResult.json() type: {type(json_data)}", file=sys.stderr)
|
| 666 |
+
if isinstance(json_data, dict):
|
| 667 |
+
print(f"DEBUG (fallback): OCRResult.json() keys: {list(json_data.keys())}", file=sys.stderr)
|
| 668 |
+
# Look for text in JSON (rec_texts is the actual key)
|
| 669 |
+
for key in ['rec_texts', 'rec_text', 'dt_polys', 'ocr_text', 'text', 'texts', 'result', 'results']:
|
| 670 |
+
if key in json_data:
|
| 671 |
+
val = json_data[key]
|
| 672 |
+
if isinstance(val, list):
|
| 673 |
+
# rec_texts is a list of strings directly
|
| 674 |
+
if key == 'rec_texts':
|
| 675 |
+
for text_item in val:
|
| 676 |
+
if isinstance(text_item, str) and text_item.strip():
|
| 677 |
+
all_text.append(text_item.strip())
|
| 678 |
+
elif text_item:
|
| 679 |
+
all_text.append(str(text_item))
|
| 680 |
+
if val:
|
| 681 |
+
text_found = True
|
| 682 |
+
else:
|
| 683 |
+
for item in val:
|
| 684 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 685 |
+
text_part = item[1]
|
| 686 |
+
if isinstance(text_part, (list, tuple)) and len(text_part) >= 1:
|
| 687 |
+
all_text.append(str(text_part[0]))
|
| 688 |
+
elif isinstance(item, str):
|
| 689 |
+
all_text.append(item)
|
| 690 |
+
if val:
|
| 691 |
+
text_found = True
|
| 692 |
+
elif isinstance(val, str) and val:
|
| 693 |
+
all_text.append(val)
|
| 694 |
+
text_found = True
|
| 695 |
+
if text_found:
|
| 696 |
+
break
|
| 697 |
+
except Exception as e:
|
| 698 |
+
print(f"DEBUG (fallback): Error calling json(): {e}", file=sys.stderr)
|
| 699 |
+
|
| 700 |
+
# Try rec_text attribute
|
| 701 |
+
if not text_found and hasattr(ocr_result, 'rec_text'):
|
| 702 |
+
rec_text = ocr_result.rec_text
|
| 703 |
+
print(f"DEBUG (fallback): Found rec_text attribute: {type(rec_text)}", file=sys.stderr)
|
| 704 |
+
if isinstance(rec_text, list):
|
| 705 |
+
all_text.extend([str(t) for t in rec_text if t])
|
| 706 |
+
text_found = True
|
| 707 |
+
elif rec_text:
|
| 708 |
+
all_text.append(str(rec_text))
|
| 709 |
+
text_found = True
|
| 710 |
+
|
| 711 |
+
# Try text attribute
|
| 712 |
+
if not text_found and hasattr(ocr_result, 'text'):
|
| 713 |
+
text = ocr_result.text
|
| 714 |
+
print(f"DEBUG (fallback): Found text attribute: {type(text)}", file=sys.stderr)
|
| 715 |
+
if isinstance(text, list):
|
| 716 |
+
all_text.extend([str(t) for t in text if t])
|
| 717 |
+
text_found = True
|
| 718 |
+
elif text:
|
| 719 |
+
all_text.append(str(text))
|
| 720 |
+
text_found = True
|
| 721 |
+
|
| 722 |
+
# If still no text, print full structure for debugging
|
| 723 |
+
if not text_found:
|
| 724 |
+
print(f"DEBUG (fallback): Could not find text in OCRResult, trying to inspect structure", file=sys.stderr)
|
| 725 |
+
try:
|
| 726 |
+
print(f"DEBUG (fallback): OCRResult repr: {repr(ocr_result)[:500]}", file=sys.stderr)
|
| 727 |
+
# Try to get all keys/items
|
| 728 |
+
if hasattr(ocr_result, 'keys'):
|
| 729 |
+
try:
|
| 730 |
+
all_keys = list(ocr_result.keys())
|
| 731 |
+
print(f"DEBUG (fallback): All OCRResult keys: {all_keys}", file=sys.stderr)
|
| 732 |
+
for key in all_keys:
|
| 733 |
+
try:
|
| 734 |
+
val = ocr_result[key]
|
| 735 |
+
print(f"DEBUG (fallback): Key '{key}' type: {type(val)}, value preview: {str(val)[:100]}", file=sys.stderr)
|
| 736 |
+
except:
|
| 737 |
+
pass
|
| 738 |
+
except:
|
| 739 |
+
pass
|
| 740 |
+
except Exception as e:
|
| 741 |
+
print(f"DEBUG (fallback): Error inspecting structure: {e}", file=sys.stderr)
|
| 742 |
+
else:
|
| 743 |
+
# Old format - list of lists
|
| 744 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 745 |
+
print(f"DEBUG (fallback): Processing lines (old format), count: {len(lines) if isinstance(lines, list) else 'N/A'}", file=sys.stderr)
|
| 746 |
+
for item in lines:
|
| 747 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 748 |
+
meta = item[1]
|
| 749 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 750 |
+
all_text.append(str(meta[0]))
|
| 751 |
+
except Exception as e:
|
| 752 |
+
print(f"DEBUG (fallback): Error processing OCR results: {str(e)}", file=sys.stderr)
|
| 753 |
+
import traceback
|
| 754 |
+
print(f"DEBUG (fallback): Traceback: {traceback.format_exc()}", file=sys.stderr)
|
| 755 |
+
# Try to inspect the object attributes
|
| 756 |
+
if results and isinstance(results, list) and len(results) > 0:
|
| 757 |
+
first_item = results[0]
|
| 758 |
+
print(f"DEBUG (fallback): First item attributes: {dir(first_item)}", file=sys.stderr)
|
| 759 |
+
if hasattr(first_item, '__dict__'):
|
| 760 |
+
print(f"DEBUG (fallback): First item dict: {first_item.__dict__}", file=sys.stderr)
|
| 761 |
+
|
| 762 |
+
print(f"DEBUG (fallback): Extracted text lines: {all_text}", file=sys.stderr)
|
| 763 |
+
|
| 764 |
+
return extract_police_details(all_text) if all_text else {'id_number': None, 'full_name': None, 'address': None, 'birth_date': None, 'birth_place': None, 'citizenship': None, 'gender': None, 'status': None, 'success': False}
|
| 765 |
+
|
| 766 |
+
# Main Execution
|
| 767 |
+
if len(sys.argv) < 2:
|
| 768 |
+
sys.stdout = original_stdout
|
| 769 |
+
print(json.dumps({"success": False, "error": "No image URL provided"}))
|
| 770 |
+
sys.exit(1)
|
| 771 |
+
|
| 772 |
+
image_url = sys.argv[1]
|
| 773 |
+
print(f"DEBUG: Processing Police Clearance image URL: {image_url}", file=sys.stderr)
|
| 774 |
+
|
| 775 |
+
try:
|
| 776 |
+
image_path = download_image(image_url, 'temp_police_image.jpg')
|
| 777 |
+
print(f"DEBUG: Image downloaded to: {image_path}", file=sys.stderr)
|
| 778 |
+
|
| 779 |
+
# Try the original OCR method first
|
| 780 |
+
ocr_results = extract_ocr_lines(image_path)
|
| 781 |
+
print(f"DEBUG: OCR results from extract_ocr_lines: {ocr_results}", file=sys.stderr)
|
| 782 |
+
|
| 783 |
+
# If original method fails, try simple method with advanced features
|
| 784 |
+
if not ocr_results['success']:
|
| 785 |
+
print("DEBUG: Original method failed, trying simple method with advanced features", file=sys.stderr)
|
| 786 |
+
ocr_results = extract_ocr_lines_simple(image_path)
|
| 787 |
+
print(f"DEBUG: OCR results from extract_ocr_lines_simple: {ocr_results}", file=sys.stderr)
|
| 788 |
+
|
| 789 |
+
# Clean up
|
| 790 |
+
if os.path.exists(image_path):
|
| 791 |
+
os.remove(image_path)
|
| 792 |
+
|
| 793 |
+
response = {
|
| 794 |
+
"success": ocr_results['success'],
|
| 795 |
+
"data": ocr_results
|
| 796 |
+
}
|
| 797 |
+
|
| 798 |
+
sys.stdout = original_stdout
|
| 799 |
+
sys.stdout.write(json.dumps(response))
|
| 800 |
+
sys.stdout.flush()
|
| 801 |
+
|
| 802 |
+
except Exception as e:
|
| 803 |
+
sys.stdout = original_stdout
|
| 804 |
+
sys.stdout.write(json.dumps({"success": False, "error": str(e)}))
|
| 805 |
+
sys.stdout.flush()
|
| 806 |
+
sys.exit(1)
|
| 807 |
+
finally:
|
| 808 |
+
try:
|
| 809 |
+
if os.path.exists('temp_police_image.jpg'):
|
| 810 |
+
os.remove('temp_police_image.jpg')
|
| 811 |
+
except:
|
| 812 |
+
pass
|
extract_postal.py
ADDED
|
@@ -0,0 +1,419 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys, json, os, glob, requests
|
| 2 |
+
import re
|
| 3 |
+
import time
|
| 4 |
+
from contextlib import redirect_stdout, redirect_stderr
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
|
| 7 |
+
# Immediately redirect all output to stderr except for our final JSON
|
| 8 |
+
original_stdout = sys.stdout
|
| 9 |
+
sys.stdout = sys.stderr
|
| 10 |
+
|
| 11 |
+
# Suppress all PaddleOCR output
|
| 12 |
+
os.environ['PADDLEOCR_LOG_LEVEL'] = 'ERROR'
|
| 13 |
+
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
|
| 14 |
+
os.environ['DISPLAY'] = ':99'
|
| 15 |
+
|
| 16 |
+
# Import PaddleOCR after setting environment variables
|
| 17 |
+
from paddleocr import PaddleOCR
|
| 18 |
+
|
| 19 |
+
def download_image(url, output_path='temp_postal_image.jpg'):
|
| 20 |
+
# Remove any existing temp file
|
| 21 |
+
if os.path.exists(output_path):
|
| 22 |
+
os.remove(output_path)
|
| 23 |
+
|
| 24 |
+
# Add cache-busting parameters
|
| 25 |
+
timestamp = int(time.time())
|
| 26 |
+
if '?' in url:
|
| 27 |
+
url += f'&t={timestamp}'
|
| 28 |
+
else:
|
| 29 |
+
url += f'?t={timestamp}'
|
| 30 |
+
|
| 31 |
+
# Add headers to prevent caching
|
| 32 |
+
headers = {
|
| 33 |
+
'Cache-Control': 'no-cache, no-store, must-revalidate',
|
| 34 |
+
'Pragma': 'no-cache',
|
| 35 |
+
'Expires': '0'
|
| 36 |
+
}
|
| 37 |
+
|
| 38 |
+
response = requests.get(url, headers=headers)
|
| 39 |
+
response.raise_for_status()
|
| 40 |
+
image_data = response.content
|
| 41 |
+
|
| 42 |
+
# Save the image
|
| 43 |
+
with open(output_path, 'wb') as f:
|
| 44 |
+
f.write(image_data)
|
| 45 |
+
|
| 46 |
+
return output_path
|
| 47 |
+
|
| 48 |
+
def format_date(date_str):
|
| 49 |
+
"""Format date from various formats to YYYY-MM-DD"""
|
| 50 |
+
if not date_str:
|
| 51 |
+
return None
|
| 52 |
+
|
| 53 |
+
date_str = date_str.strip()
|
| 54 |
+
|
| 55 |
+
# Fix common OCR errors first
|
| 56 |
+
date_str = date_str.replace('Ol', '01').replace('O1', '01').replace('O0', '00').replace('OO', '00')
|
| 57 |
+
date_str = date_str.replace('l', '1') # lowercase L -> 1
|
| 58 |
+
|
| 59 |
+
# Handle format like "14 Aug 88" or "14 Aug88" -> "1988-08-14"
|
| 60 |
+
# Allow for missing space between month and year
|
| 61 |
+
match = re.match(r'(\d{1,2})\s*([A-Za-z]{3})\s*(\d{2,4})', date_str)
|
| 62 |
+
if match:
|
| 63 |
+
day, month_str, year = match.groups()
|
| 64 |
+
try:
|
| 65 |
+
# Fix month OCR errors
|
| 66 |
+
month_str = month_str.replace('Augu', 'Aug').replace('Augu', 'Aug')
|
| 67 |
+
month_str = month_str.replace('Decm', 'Dec').replace('Dece', 'Dec')
|
| 68 |
+
month_str = month_str.replace('Janu', 'Jan').replace('Febr', 'Feb')
|
| 69 |
+
month_str = month_str.replace('Marc', 'Mar').replace('Apil', 'Apr')
|
| 70 |
+
month_str = month_str.replace('May', 'May').replace('June', 'Jun')
|
| 71 |
+
month_str = month_str.replace('July', 'Jul').replace('Sept', 'Sep')
|
| 72 |
+
month_str = month_str.replace('Octo', 'Oct').replace('Novem', 'Nov')
|
| 73 |
+
|
| 74 |
+
# Convert 2-digit year to 4-digit (assume 1900s for years > 50, 2000s for <= 50)
|
| 75 |
+
if len(year) == 2:
|
| 76 |
+
year_int = int(year)
|
| 77 |
+
year = f"19{year}" if year_int > 50 else f"20{year}"
|
| 78 |
+
|
| 79 |
+
# Parse month abbreviation (use first 3 chars)
|
| 80 |
+
month = datetime.strptime(month_str[:3], '%b').month
|
| 81 |
+
return f"{year}-{month:02d}-{int(day):02d}"
|
| 82 |
+
except Exception as e:
|
| 83 |
+
print(f"DEBUG: Date parsing error: {e}", file=sys.stderr)
|
| 84 |
+
pass
|
| 85 |
+
|
| 86 |
+
# Try other common formats
|
| 87 |
+
for fmt in ["%d %b %Y", "%d %B %Y", "%Y-%m-%d", "%m/%d/%Y", "%d/%m/%Y", "%d%b%Y", "%d%B%Y"]:
|
| 88 |
+
try:
|
| 89 |
+
dt = datetime.strptime(date_str, fmt)
|
| 90 |
+
return dt.strftime("%Y-%m-%d")
|
| 91 |
+
except Exception:
|
| 92 |
+
continue
|
| 93 |
+
|
| 94 |
+
return date_str
|
| 95 |
+
|
| 96 |
+
def format_name(name):
|
| 97 |
+
"""Format name: capitalize properly"""
|
| 98 |
+
if not name:
|
| 99 |
+
return None
|
| 100 |
+
|
| 101 |
+
# Remove extra spaces and normalize
|
| 102 |
+
name = ' '.join(name.split())
|
| 103 |
+
|
| 104 |
+
# Capitalize each word properly
|
| 105 |
+
name = ' '.join([word.capitalize() for word in name.split()])
|
| 106 |
+
|
| 107 |
+
return name.strip()
|
| 108 |
+
|
| 109 |
+
def format_address(address_lines):
|
| 110 |
+
"""Format address from multiple lines"""
|
| 111 |
+
if not address_lines:
|
| 112 |
+
return None
|
| 113 |
+
|
| 114 |
+
# Join address lines and clean up
|
| 115 |
+
address = ' '.join([line.strip() for line in address_lines if line.strip()])
|
| 116 |
+
|
| 117 |
+
# Fix missing spaces: "585Gen." -> "585 Gen."
|
| 118 |
+
address = re.sub(r'(\d+)([A-Z])', r'\1 \2', address)
|
| 119 |
+
|
| 120 |
+
# Fix missing spaces before abbreviations: "Brgy.Rivera" -> "Brgy. Rivera"
|
| 121 |
+
address = re.sub(r'([a-z])([A-Z])', r'\1 \2', address)
|
| 122 |
+
|
| 123 |
+
# Remove extra spaces
|
| 124 |
+
address = ' '.join(address.split())
|
| 125 |
+
|
| 126 |
+
return address.strip()
|
| 127 |
+
|
| 128 |
+
def extract_postal_details(lines):
|
| 129 |
+
details = {
|
| 130 |
+
'prn': None,
|
| 131 |
+
'full_name': None,
|
| 132 |
+
'address': None,
|
| 133 |
+
'birth_date': None,
|
| 134 |
+
'nationality': None,
|
| 135 |
+
'issuing_post_office': None,
|
| 136 |
+
'valid_until': None,
|
| 137 |
+
'success': False
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
# Clean lines - convert to strings and strip
|
| 141 |
+
cleaned_lines = [str(line).strip() for line in lines if str(line).strip()]
|
| 142 |
+
|
| 143 |
+
for i, line in enumerate(cleaned_lines):
|
| 144 |
+
line_upper = line.upper().strip()
|
| 145 |
+
line_stripped = line.strip()
|
| 146 |
+
|
| 147 |
+
# Extract PRN (Postal Registration Number)
|
| 148 |
+
# Format: "PRN 100141234567 P POSTAL" or "PRN100141234567P" or "PAN100141234567P" (OCR might misread PRN as PAN)
|
| 149 |
+
if not details['prn']:
|
| 150 |
+
# Look for PRN followed by digits (may have P POSTAL after)
|
| 151 |
+
prn_match = re.search(r'PRN\s*(\d{10,15})', line_upper)
|
| 152 |
+
if prn_match:
|
| 153 |
+
details['prn'] = prn_match.group(1)
|
| 154 |
+
# Also check for PAN (common OCR error where PRN is misread as PAN)
|
| 155 |
+
elif re.search(r'PAN\s*(\d{10,15})', line_upper):
|
| 156 |
+
pan_match = re.search(r'PAN\s*(\d{10,15})', line_upper)
|
| 157 |
+
if pan_match:
|
| 158 |
+
details['prn'] = pan_match.group(1)
|
| 159 |
+
|
| 160 |
+
# Extract Full Name - combine separate name parts
|
| 161 |
+
# Look for label "First Name Middle Name Surname, Suffix" or name parts
|
| 162 |
+
if not details['full_name']:
|
| 163 |
+
# Check if this line is the label
|
| 164 |
+
if ("FIRST NAME" in line_upper or "FINT NAME" in line_upper) and ("SURNAME" in line_upper or "SUMAME" in line_upper):
|
| 165 |
+
# Collect name parts from next few lines
|
| 166 |
+
name_parts = []
|
| 167 |
+
for j in range(1, min(5, len(cleaned_lines) - i)):
|
| 168 |
+
next_line = cleaned_lines[i+j].strip()
|
| 169 |
+
next_upper = next_line.upper()
|
| 170 |
+
# Stop if we hit address or other labels
|
| 171 |
+
if any(label in next_upper for label in ['ADDRESS', 'DATE', 'BIRTH', 'NATIONALITY', 'ISSUING', 'VALID', 'GEN', 'TUAZON', 'BLVD', 'BRGY', '585', 'PASAY']):
|
| 172 |
+
break
|
| 173 |
+
# Add if it looks like a name part (all caps, letters and spaces only, not too short)
|
| 174 |
+
if next_line and re.match(r'^[A-Z\s,]+$', next_line) and len(next_line) > 1:
|
| 175 |
+
# Skip if it's clearly not a name (like "ID", "C", etc.)
|
| 176 |
+
if next_line not in ['ID', 'C', 'P', 'POSTAL']:
|
| 177 |
+
name_parts.append(next_line)
|
| 178 |
+
if name_parts:
|
| 179 |
+
details['full_name'] = ' '.join(name_parts)
|
| 180 |
+
# Also check if line is a name part (all caps, not a label)
|
| 181 |
+
elif re.match(r'^[A-Z\s,]+$', line_stripped) and len(line_stripped) > 2:
|
| 182 |
+
# Make sure it's not a label or common words
|
| 183 |
+
if not any(label in line_upper for label in ['FIRST NAME', 'MIDDLE NAME', 'SURNAME', 'ADDRESS', 'DATE', 'BIRTH', 'NATIONALITY', 'ISSUING', 'VALID', 'POSTAL', 'IDENTITY', 'CARD', 'PHCPOST', 'PHILIPPINE', 'PREMIUM']):
|
| 184 |
+
# Check if previous line is the name label
|
| 185 |
+
if i > 0:
|
| 186 |
+
prev_line = cleaned_lines[i-1].strip().upper()
|
| 187 |
+
if "FIRST NAME" in prev_line or "FINT NAME" in prev_line or "SUMAME" in prev_line or "SURNAME" in prev_line:
|
| 188 |
+
# Collect consecutive name parts
|
| 189 |
+
name_parts = [line_stripped]
|
| 190 |
+
for j in range(1, min(4, len(cleaned_lines) - i)):
|
| 191 |
+
next_line = cleaned_lines[i+j].strip()
|
| 192 |
+
if (next_line and re.match(r'^[A-Z\s,]+$', next_line) and
|
| 193 |
+
len(next_line) > 2 and
|
| 194 |
+
not any(label in next_line.upper() for label in ['ADDRESS', 'DATE', 'BIRTH', 'GEN', 'TUAZON', 'BLVD', 'BRGY', '585', 'PASAY', 'ID', 'POSTAL', 'PREMIUM'])):
|
| 195 |
+
name_parts.append(next_line)
|
| 196 |
+
else:
|
| 197 |
+
break
|
| 198 |
+
if len(name_parts) >= 2:
|
| 199 |
+
details['full_name'] = ' '.join(name_parts)
|
| 200 |
+
elif len(name_parts) == 1 and len(name_parts[0].split()) >= 2:
|
| 201 |
+
details['full_name'] = name_parts[0]
|
| 202 |
+
|
| 203 |
+
# Extract Address - look for address parts (street numbers, Gen., Blvd., Brgy., City)
|
| 204 |
+
if not details['address']:
|
| 205 |
+
# Look for address indicators
|
| 206 |
+
if any(indicator in line_upper for indicator in ['GEN', 'TUAZON', 'BLVD', 'BRGY', 'PASAY', 'CITY']) or (re.match(r'^\d+', line_stripped) and len(line_stripped) > 2):
|
| 207 |
+
address_lines = []
|
| 208 |
+
# Check backwards a bit to see if we missed address start
|
| 209 |
+
start_idx = max(0, i - 1)
|
| 210 |
+
|
| 211 |
+
# Collect address lines forward
|
| 212 |
+
for j in range(0, min(7, len(cleaned_lines) - start_idx)):
|
| 213 |
+
idx = start_idx + j
|
| 214 |
+
if idx >= len(cleaned_lines):
|
| 215 |
+
break
|
| 216 |
+
addr_line = cleaned_lines[idx].strip()
|
| 217 |
+
addr_upper = addr_line.upper()
|
| 218 |
+
|
| 219 |
+
# Stop if we hit date, nationality, or other labels
|
| 220 |
+
if any(label in addr_upper for label in ['DATE', 'BIRTH', 'NATIONALITY', 'FILIPINO', 'ISSUING', 'VALID', 'PAN', 'NOCON']):
|
| 221 |
+
break
|
| 222 |
+
|
| 223 |
+
# Skip very short lines that are likely OCR noise (like "101", "o00")
|
| 224 |
+
if len(addr_line) <= 2 and not re.match(r'^\d+$', addr_line):
|
| 225 |
+
continue
|
| 226 |
+
|
| 227 |
+
# Add if it looks like address content
|
| 228 |
+
if addr_line and len(addr_line) > 1:
|
| 229 |
+
# Check if it's a number, street name, barangay, city, etc.
|
| 230 |
+
if (re.match(r'^\d+', addr_line) or
|
| 231 |
+
any(indicator in addr_upper for indicator in ['GEN', 'TUAZON', 'BLVD', 'BRGY', 'PASAY', 'CITY', 'STREET', 'AVE', 'BOULEVARD']) or
|
| 232 |
+
len(address_lines) > 0): # Continue if we've started collecting
|
| 233 |
+
# Skip obvious OCR errors like "o00"
|
| 234 |
+
if addr_line.lower() not in ['o00', 'o0', '00']:
|
| 235 |
+
address_lines.append(addr_line)
|
| 236 |
+
|
| 237 |
+
if address_lines:
|
| 238 |
+
details['address'] = format_address(address_lines)
|
| 239 |
+
|
| 240 |
+
# Extract Date of Birth - handle OCR errors
|
| 241 |
+
if not details['birth_date']:
|
| 242 |
+
# Look for date patterns: "14 Aug88" or "14 Aug 88"
|
| 243 |
+
date_match = re.search(r'(\d{1,2})\s*([A-Za-z]{3})\s*(\d{2,4})', line_stripped)
|
| 244 |
+
if date_match:
|
| 245 |
+
# Check if it's not the valid until date
|
| 246 |
+
if "VALID" not in line_upper and "UNTIL" not in line_upper:
|
| 247 |
+
# Fix spacing
|
| 248 |
+
day, month, year = date_match.groups()
|
| 249 |
+
details['birth_date'] = f"{day} {month} {year}"
|
| 250 |
+
|
| 251 |
+
# Extract Nationality
|
| 252 |
+
if not details['nationality']:
|
| 253 |
+
if "NATIONALITY" in line_upper or line_upper == "FILIPINO":
|
| 254 |
+
if line_upper == "FILIPINO":
|
| 255 |
+
details['nationality'] = "Filipino"
|
| 256 |
+
elif i + 1 < len(cleaned_lines):
|
| 257 |
+
next_line = cleaned_lines[i+1].strip()
|
| 258 |
+
if next_line and len(next_line) < 20:
|
| 259 |
+
details['nationality'] = next_line
|
| 260 |
+
|
| 261 |
+
# Extract Issuing Post Office - handle OCR errors like "IssungPostOmce"
|
| 262 |
+
if not details['issuing_post_office']:
|
| 263 |
+
if ("ISSUING POST OFFICE" in line_upper or "ISSUING POST" in line_upper or
|
| 264 |
+
"ISSUINGPOST" in line_upper or "ISSUINGPOSTOMCE" in line_upper):
|
| 265 |
+
if i + 1 < len(cleaned_lines):
|
| 266 |
+
next_line = cleaned_lines[i+1].strip()
|
| 267 |
+
if next_line and len(next_line) < 20:
|
| 268 |
+
# Fix OCR errors: MNL.QE -> MNL-QE
|
| 269 |
+
next_line = next_line.replace('.', '-')
|
| 270 |
+
details['issuing_post_office'] = next_line
|
| 271 |
+
|
| 272 |
+
# Extract Valid Until - handle OCR errors like "Vald Urt" and "OlDec17"
|
| 273 |
+
if not details['valid_until']:
|
| 274 |
+
if ("VALID UNTIL" in line_upper or "VALIDUNTIL" in line_upper or
|
| 275 |
+
"VALD URT" in line_upper or "VALDURT" in line_upper):
|
| 276 |
+
if i + 1 < len(cleaned_lines):
|
| 277 |
+
next_line = cleaned_lines[i+1].strip()
|
| 278 |
+
# Fix OCR errors: OlDec17 -> 01 Dec 17
|
| 279 |
+
# Replace common OCR errors
|
| 280 |
+
next_line = next_line.replace('Ol', '01').replace('O1', '01')
|
| 281 |
+
next_line = next_line.replace('O0', '00').replace('OO', '00')
|
| 282 |
+
# Try to extract date pattern
|
| 283 |
+
date_match = re.search(r'(\d{1,2})\s*([A-Za-z]{3})\s*(\d{2,4})', next_line)
|
| 284 |
+
if date_match:
|
| 285 |
+
day, month, year = date_match.groups()
|
| 286 |
+
details['valid_until'] = f"{day} {month} {year}"
|
| 287 |
+
elif next_line:
|
| 288 |
+
details['valid_until'] = next_line
|
| 289 |
+
|
| 290 |
+
# Format extracted fields
|
| 291 |
+
if details['full_name']:
|
| 292 |
+
details['full_name'] = format_name(details['full_name'])
|
| 293 |
+
if details['birth_date']:
|
| 294 |
+
details['birth_date'] = format_date(details['birth_date'])
|
| 295 |
+
if details['valid_until']:
|
| 296 |
+
details['valid_until'] = format_date(details['valid_until'])
|
| 297 |
+
|
| 298 |
+
if details['prn'] or details['full_name']:
|
| 299 |
+
details['success'] = True
|
| 300 |
+
|
| 301 |
+
return details
|
| 302 |
+
|
| 303 |
+
def extract_ocr_lines(image_path):
|
| 304 |
+
# Check if file exists
|
| 305 |
+
if not os.path.exists(image_path):
|
| 306 |
+
return {'success': False, 'error': 'File not found'}
|
| 307 |
+
|
| 308 |
+
file_size = os.path.getsize(image_path)
|
| 309 |
+
print(f"DEBUG: Image file size: {file_size} bytes", file=sys.stderr)
|
| 310 |
+
|
| 311 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 312 |
+
# Try simple configuration first
|
| 313 |
+
ocr = PaddleOCR(
|
| 314 |
+
use_doc_orientation_classify=False,
|
| 315 |
+
use_doc_unwarping=False,
|
| 316 |
+
use_textline_orientation=False,
|
| 317 |
+
lang='en'
|
| 318 |
+
)
|
| 319 |
+
try:
|
| 320 |
+
results = ocr.ocr(image_path)
|
| 321 |
+
except Exception as e:
|
| 322 |
+
print(f"DEBUG: ocr() failed: {e}, trying predict()", file=sys.stderr)
|
| 323 |
+
if hasattr(ocr, 'predict'):
|
| 324 |
+
results = ocr.predict(image_path)
|
| 325 |
+
else:
|
| 326 |
+
results = None
|
| 327 |
+
|
| 328 |
+
# Debug: Print raw results structure
|
| 329 |
+
print(f"DEBUG: Raw OCR results type: {type(results)}", file=sys.stderr)
|
| 330 |
+
|
| 331 |
+
all_text = []
|
| 332 |
+
try:
|
| 333 |
+
# Handle both old format (list) and new format (OCRResult object)
|
| 334 |
+
if results and isinstance(results, list) and len(results) > 0:
|
| 335 |
+
first_item = results[0]
|
| 336 |
+
item_type_name = type(first_item).__name__
|
| 337 |
+
is_ocr_result = 'OCRResult' in item_type_name or 'ocr_result' in str(type(first_item)).lower()
|
| 338 |
+
|
| 339 |
+
if is_ocr_result:
|
| 340 |
+
print(f"DEBUG: Detected OCRResult object format (type: {item_type_name})", file=sys.stderr)
|
| 341 |
+
# Access OCRResult as dictionary
|
| 342 |
+
try:
|
| 343 |
+
if hasattr(first_item, 'keys'):
|
| 344 |
+
ocr_dict = dict(first_item)
|
| 345 |
+
# Look for rec_texts key
|
| 346 |
+
if 'rec_texts' in ocr_dict:
|
| 347 |
+
rec_texts = ocr_dict['rec_texts']
|
| 348 |
+
if isinstance(rec_texts, list):
|
| 349 |
+
all_text = [str(t) for t in rec_texts if t]
|
| 350 |
+
print(f"DEBUG: Extracted {len(all_text)} text lines from rec_texts", file=sys.stderr)
|
| 351 |
+
except Exception as e:
|
| 352 |
+
print(f"DEBUG: Error accessing OCRResult: {e}", file=sys.stderr)
|
| 353 |
+
else:
|
| 354 |
+
# Old format - list of lists
|
| 355 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 356 |
+
for item in lines:
|
| 357 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 358 |
+
meta = item[1]
|
| 359 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 360 |
+
all_text.append(str(meta[0]))
|
| 361 |
+
except Exception as e:
|
| 362 |
+
print(f"DEBUG: Error processing OCR results: {str(e)}", file=sys.stderr)
|
| 363 |
+
import traceback
|
| 364 |
+
print(f"DEBUG: Traceback: {traceback.format_exc()}", file=sys.stderr)
|
| 365 |
+
|
| 366 |
+
print(f"DEBUG: Extracted text lines: {all_text}", file=sys.stderr)
|
| 367 |
+
|
| 368 |
+
return extract_postal_details(all_text) if all_text else {
|
| 369 |
+
'prn': None,
|
| 370 |
+
'full_name': None,
|
| 371 |
+
'address': None,
|
| 372 |
+
'birth_date': None,
|
| 373 |
+
'nationality': None,
|
| 374 |
+
'issuing_post_office': None,
|
| 375 |
+
'valid_until': None,
|
| 376 |
+
'success': False
|
| 377 |
+
}
|
| 378 |
+
|
| 379 |
+
# Main Execution
|
| 380 |
+
if len(sys.argv) < 2:
|
| 381 |
+
sys.stdout = original_stdout
|
| 382 |
+
print(json.dumps({"success": False, "error": "No image URL provided"}))
|
| 383 |
+
sys.exit(1)
|
| 384 |
+
|
| 385 |
+
image_url = sys.argv[1]
|
| 386 |
+
print(f"DEBUG: Processing Postal ID image URL: {image_url}", file=sys.stderr)
|
| 387 |
+
|
| 388 |
+
try:
|
| 389 |
+
image_path = download_image(image_url, 'temp_postal_image.jpg')
|
| 390 |
+
print(f"DEBUG: Image downloaded to: {image_path}", file=sys.stderr)
|
| 391 |
+
|
| 392 |
+
ocr_results = extract_ocr_lines(image_path)
|
| 393 |
+
print(f"DEBUG: OCR results: {ocr_results}", file=sys.stderr)
|
| 394 |
+
|
| 395 |
+
# Clean up
|
| 396 |
+
if os.path.exists(image_path):
|
| 397 |
+
os.remove(image_path)
|
| 398 |
+
|
| 399 |
+
response = {
|
| 400 |
+
"success": ocr_results['success'],
|
| 401 |
+
"data": ocr_results
|
| 402 |
+
}
|
| 403 |
+
|
| 404 |
+
sys.stdout = original_stdout
|
| 405 |
+
sys.stdout.write(json.dumps(response))
|
| 406 |
+
sys.stdout.flush()
|
| 407 |
+
|
| 408 |
+
except Exception as e:
|
| 409 |
+
sys.stdout = original_stdout
|
| 410 |
+
sys.stdout.write(json.dumps({"success": False, "error": str(e)}))
|
| 411 |
+
sys.stdout.flush()
|
| 412 |
+
sys.exit(1)
|
| 413 |
+
finally:
|
| 414 |
+
try:
|
| 415 |
+
if os.path.exists('temp_postal_image.jpg'):
|
| 416 |
+
os.remove('temp_postal_image.jpg')
|
| 417 |
+
except:
|
| 418 |
+
pass
|
| 419 |
+
|
extract_prc.py
ADDED
|
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Philippine PRC (Professional Regulation Commission) License Information Extraction Script
|
| 4 |
+
|
| 5 |
+
Purpose:
|
| 6 |
+
Extracts structured information from PRC license images using OCR.
|
| 7 |
+
Handles various PRC license formats including UMID-style cards.
|
| 8 |
+
|
| 9 |
+
Why this script exists:
|
| 10 |
+
- PRC licenses have complex layouts with multiple information fields
|
| 11 |
+
- Need to extract profession-specific information
|
| 12 |
+
- Handles both traditional PRC licenses and UMID-style PRC cards
|
| 13 |
+
- Required for professional verification workflows
|
| 14 |
+
|
| 15 |
+
Key Features:
|
| 16 |
+
- Extracts CRN (Common Reference Number) - 12-digit format
|
| 17 |
+
- Processes registration numbers and dates
|
| 18 |
+
- Extracts profession information
|
| 19 |
+
- Handles GSIS/SSS number extraction
|
| 20 |
+
- Supports validity date tracking
|
| 21 |
+
|
| 22 |
+
Dependencies:
|
| 23 |
+
- PaddleOCR: High-accuracy OCR engine (https://github.com/PaddlePaddle/PaddleOCR)
|
| 24 |
+
- Pillow (PIL): Image processing (https://pillow.readthedocs.io/)
|
| 25 |
+
- requests: HTTP library (https://docs.python-requests.org/)
|
| 26 |
+
|
| 27 |
+
Usage:
|
| 28 |
+
python extract_prc.py "https://example.com/prc_license.jpg"
|
| 29 |
+
|
| 30 |
+
Output:
|
| 31 |
+
JSON with extracted information: crn, registration_number, profession, valid_until, etc.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
import sys, json, os, glob, re, requests
|
| 35 |
+
from PIL import Image
|
| 36 |
+
from io import BytesIO
|
| 37 |
+
from datetime import datetime
|
| 38 |
+
from contextlib import redirect_stdout, redirect_stderr
|
| 39 |
+
|
| 40 |
+
# Immediately redirect all output to stderr except for our final JSON
|
| 41 |
+
original_stdout = sys.stdout
|
| 42 |
+
sys.stdout = sys.stderr
|
| 43 |
+
|
| 44 |
+
# Suppress all PaddleOCR output
|
| 45 |
+
os.environ['PADDLEOCR_LOG_LEVEL'] = 'ERROR'
|
| 46 |
+
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
|
| 47 |
+
os.environ['DISPLAY'] = ':99'
|
| 48 |
+
|
| 49 |
+
# Import PaddleOCR after setting environment variables
|
| 50 |
+
from paddleocr import PaddleOCR
|
| 51 |
+
|
| 52 |
+
def dprint(msg, obj=None):
|
| 53 |
+
"""
|
| 54 |
+
Debug print function that safely handles object serialization.
|
| 55 |
+
|
| 56 |
+
Args:
|
| 57 |
+
msg (str): Debug message
|
| 58 |
+
obj (any): Object to print (optional)
|
| 59 |
+
|
| 60 |
+
Why this approach:
|
| 61 |
+
- Centralized debug logging
|
| 62 |
+
- Safe object serialization
|
| 63 |
+
- Consistent debug output format
|
| 64 |
+
"""
|
| 65 |
+
try:
|
| 66 |
+
print(f"DEBUG: {msg}" + (f": {obj}" if obj is not None else ""), file=sys.stderr)
|
| 67 |
+
except Exception:
|
| 68 |
+
pass
|
| 69 |
+
|
| 70 |
+
def clean_cache():
|
| 71 |
+
cache_files = ['temp_image.jpg', 'temp_image_ocr_res_img.jpg', 'temp_image_preprocessed_img.jpg', 'temp_image_res.json']
|
| 72 |
+
for f in cache_files:
|
| 73 |
+
if os.path.exists(f):
|
| 74 |
+
os.remove(f)
|
| 75 |
+
dprint("Removed cache file", f)
|
| 76 |
+
if os.path.exists("output"):
|
| 77 |
+
import shutil
|
| 78 |
+
shutil.rmtree("output")
|
| 79 |
+
dprint("Removed output directory")
|
| 80 |
+
|
| 81 |
+
def download_image(url, output_path='temp_image.jpg'):
|
| 82 |
+
dprint("Starting download", url)
|
| 83 |
+
clean_cache()
|
| 84 |
+
r = requests.get(url)
|
| 85 |
+
dprint("HTTP status", r.status_code)
|
| 86 |
+
r.raise_for_status()
|
| 87 |
+
img = Image.open(BytesIO(r.content))
|
| 88 |
+
if img.mode == 'RGBA':
|
| 89 |
+
bg = Image.new('RGB', img.size, (255,255,255))
|
| 90 |
+
bg.paste(img, mask=img.split()[-1])
|
| 91 |
+
img = bg
|
| 92 |
+
elif img.mode != 'RGB':
|
| 93 |
+
img = img.convert('RGB')
|
| 94 |
+
img.save(output_path, 'JPEG', quality=95)
|
| 95 |
+
dprint("Saved image", output_path)
|
| 96 |
+
return output_path
|
| 97 |
+
|
| 98 |
+
def format_date(s):
|
| 99 |
+
if not s: return None
|
| 100 |
+
raw = s.strip()
|
| 101 |
+
t = raw.replace(' ', '').replace('\\','/').replace('.','/')
|
| 102 |
+
if re.match(r'^\d{4}[-/]\d{2}[-/]\d{2}$', t):
|
| 103 |
+
return t.replace('/', '-')
|
| 104 |
+
# Accept mm/dd/yyyy style
|
| 105 |
+
if re.match(r'^\d{2}/\d{2}/\d{4}$', raw):
|
| 106 |
+
m, d, y = raw.split('/')
|
| 107 |
+
return f"{y}-{int(m):02d}-{int(d):02d}"
|
| 108 |
+
# Month name variants
|
| 109 |
+
m = re.match(r'([A-Za-z]+)\s*\d{1,2},\s*\d{4}', raw)
|
| 110 |
+
if m:
|
| 111 |
+
try:
|
| 112 |
+
return datetime.strptime(raw.replace(' ', ' '), "%B %d, %Y").strftime("%Y-%m-%d")
|
| 113 |
+
except Exception:
|
| 114 |
+
try:
|
| 115 |
+
return datetime.strptime(raw.replace(' ', ' '), "%b %d, %Y").strftime("%Y-%m-%d")
|
| 116 |
+
except Exception:
|
| 117 |
+
pass
|
| 118 |
+
return raw
|
| 119 |
+
|
| 120 |
+
def cap_words(name):
|
| 121 |
+
return None if not name else ' '.join(w.capitalize() for w in name.split())
|
| 122 |
+
|
| 123 |
+
def normalize_name_from_parts(last, first_block):
|
| 124 |
+
last = (last or '').strip()
|
| 125 |
+
tokens = [t for t in (first_block or '').strip().split(' ') if t]
|
| 126 |
+
given_kept = tokens[:2] # keep up to two given names
|
| 127 |
+
composed = ' '.join(given_kept + [last]).strip()
|
| 128 |
+
return cap_words(composed) if composed else None
|
| 129 |
+
|
| 130 |
+
def normalize_full_name_from_three(first, middle, last):
|
| 131 |
+
# keep first + optional second from "first" block; ignore middle completely
|
| 132 |
+
tokens = [t for t in (first or '').strip().split(' ') if t]
|
| 133 |
+
given_kept = tokens[:2]
|
| 134 |
+
composed = ' '.join(given_kept + [last or '']).strip()
|
| 135 |
+
return cap_words(composed) if composed else None
|
| 136 |
+
|
| 137 |
+
def take_within(lines, i, k=5):
|
| 138 |
+
out = []
|
| 139 |
+
for j in range(1, k+1):
|
| 140 |
+
if i+j < len(lines):
|
| 141 |
+
t = str(lines[i+j]).strip()
|
| 142 |
+
if t:
|
| 143 |
+
out.append(t)
|
| 144 |
+
return out
|
| 145 |
+
|
| 146 |
+
def is_numeric_id(t):
|
| 147 |
+
return bool(re.match(r'^\d{5,}$', str(t).replace(' ', '')))
|
| 148 |
+
|
| 149 |
+
def is_crn(t):
|
| 150 |
+
# UMID CRN commonly 12 digits
|
| 151 |
+
return bool(re.match(r'^\d{12}$', t.replace(' ', '')))
|
| 152 |
+
|
| 153 |
+
def is_date(t):
|
| 154 |
+
t1 = t.replace(' ', '').replace('\\','/').replace('.','/')
|
| 155 |
+
return bool(re.match(r'^\d{4}[-/]\d{2}[-/]\d{2}$', t1)) or bool(re.match(r'^\d{2}/\d{2}/\d{4}$', t)) or bool(re.match(r'^[A-Za-z]+\s*\d{1,2},\s*\d{4}$', t))
|
| 156 |
+
|
| 157 |
+
def extract_prc_info(lines):
|
| 158 |
+
"""
|
| 159 |
+
Extract PRC license information from OCR text lines.
|
| 160 |
+
|
| 161 |
+
Args:
|
| 162 |
+
lines (list): List of text lines from OCR processing
|
| 163 |
+
|
| 164 |
+
Returns:
|
| 165 |
+
dict: Extracted PRC information with keys: crn, registration_number, profession, etc.
|
| 166 |
+
|
| 167 |
+
Why this approach:
|
| 168 |
+
- PRC licenses have complex layouts with multiple fields
|
| 169 |
+
- Need to handle various license formats (traditional and UMID-style)
|
| 170 |
+
- Extracts profession-specific information
|
| 171 |
+
- Handles both traditional PRC licenses and UMID-style PRC cards
|
| 172 |
+
- Uses lookahead pattern matching for field extraction
|
| 173 |
+
"""
|
| 174 |
+
dprint("Lines to extract", lines)
|
| 175 |
+
|
| 176 |
+
# Initialize variables for extracted information
|
| 177 |
+
crn = None
|
| 178 |
+
full_name = None
|
| 179 |
+
birth_date = None
|
| 180 |
+
gsis_number = None
|
| 181 |
+
sss_number = None
|
| 182 |
+
registration_number = None
|
| 183 |
+
registration_date = None
|
| 184 |
+
valid_until = None
|
| 185 |
+
profession = None
|
| 186 |
+
|
| 187 |
+
# Collect name parts separately for composition
|
| 188 |
+
last_name_txt = None
|
| 189 |
+
first_name_txt = None
|
| 190 |
+
|
| 191 |
+
L = [str(x or '').strip() for x in lines]
|
| 192 |
+
i = 0
|
| 193 |
+
while i < len(L):
|
| 194 |
+
line = L[i]
|
| 195 |
+
low = line.lower()
|
| 196 |
+
dprint("Line", {"i": i, "text": line})
|
| 197 |
+
|
| 198 |
+
# Extract CRN (UMID format) - 12 digits
|
| 199 |
+
if crn is None and is_crn(line):
|
| 200 |
+
crn = line.replace(' ', '')
|
| 201 |
+
dprint("Found CRN", crn)
|
| 202 |
+
|
| 203 |
+
# Extract Last Name using lookahead pattern
|
| 204 |
+
if 'last name' in low:
|
| 205 |
+
ahead = take_within(L, i, 3)
|
| 206 |
+
for t in ahead:
|
| 207 |
+
tl = t.lower()
|
| 208 |
+
if not any(k in tl for k in ['first', 'middle', 'registration', 'valid', 'date', 'no']):
|
| 209 |
+
last_name_txt = t
|
| 210 |
+
break
|
| 211 |
+
|
| 212 |
+
# Extract First Name
|
| 213 |
+
if 'firstname' in low or 'first name' in low:
|
| 214 |
+
if i+1 < len(L):
|
| 215 |
+
first_name_txt = L[i+1]
|
| 216 |
+
|
| 217 |
+
# Extract Date of Birth
|
| 218 |
+
if ('date of birth' in low) or ('birth' in low and 'date' in low):
|
| 219 |
+
ahead = take_within(L, i, 4)
|
| 220 |
+
for t in ahead:
|
| 221 |
+
if is_date(t):
|
| 222 |
+
birth_date = format_date(t)
|
| 223 |
+
break
|
| 224 |
+
|
| 225 |
+
# Extract Registration Number - handles split labels
|
| 226 |
+
if low == 'registration' and i+1 < len(L) and L[i+1].lower() in ('no', 'no.', 'number'):
|
| 227 |
+
ahead = take_within(L, i+1, 4)
|
| 228 |
+
for t in ahead:
|
| 229 |
+
if is_numeric_id(t):
|
| 230 |
+
registration_number = t.replace(' ', '')
|
| 231 |
+
break
|
| 232 |
+
|
| 233 |
+
# Also handle fused label forms
|
| 234 |
+
if ('registration no' in low) or ('registration number' in low):
|
| 235 |
+
ahead = take_within(L, i, 4)
|
| 236 |
+
for t in ahead:
|
| 237 |
+
if is_numeric_id(t):
|
| 238 |
+
registration_number = t.replace(' ', '')
|
| 239 |
+
break
|
| 240 |
+
|
| 241 |
+
# Extract Registration Date
|
| 242 |
+
if low == 'registration' and i+1 < len(L) and L[i+1].lower() == 'date':
|
| 243 |
+
ahead = take_within(L, i+1, 4)
|
| 244 |
+
for t in ahead:
|
| 245 |
+
if is_date(t):
|
| 246 |
+
registration_date = format_date(t)
|
| 247 |
+
break
|
| 248 |
+
if 'registration date' in low:
|
| 249 |
+
ahead = take_within(L, i, 3)
|
| 250 |
+
for t in ahead:
|
| 251 |
+
if is_date(t):
|
| 252 |
+
registration_date = format_date(t)
|
| 253 |
+
break
|
| 254 |
+
|
| 255 |
+
# Extract Valid Until Date
|
| 256 |
+
if 'valid until' in low or 'validity' in low:
|
| 257 |
+
ahead = take_within(L, i, 3)
|
| 258 |
+
for t in ahead:
|
| 259 |
+
if is_date(t):
|
| 260 |
+
valid_until = format_date(t)
|
| 261 |
+
break
|
| 262 |
+
|
| 263 |
+
# Extract Profession from bold lines
|
| 264 |
+
if any(k in low for k in ['occupational','technician','engineer','teacher','nurse']):
|
| 265 |
+
if len(line.split()) >= 2:
|
| 266 |
+
profession = cap_words(line)
|
| 267 |
+
dprint("Found profession", profession)
|
| 268 |
+
|
| 269 |
+
# Extract SSS Number
|
| 270 |
+
if sss_number is None and ('sss' in low or 'social security' in low):
|
| 271 |
+
ahead = take_within(L, i, 3)
|
| 272 |
+
for t in ahead:
|
| 273 |
+
if is_numeric_id(t):
|
| 274 |
+
sss_number = t.replace(' ', '')
|
| 275 |
+
dprint("Found sss_number", sss_number)
|
| 276 |
+
break
|
| 277 |
+
|
| 278 |
+
# Extract GSIS Number
|
| 279 |
+
if gsis_number is None and ('gsis' in low):
|
| 280 |
+
ahead = take_within(L, i, 3)
|
| 281 |
+
for t in ahead:
|
| 282 |
+
if is_numeric_id(t):
|
| 283 |
+
gsis_number = t.replace(' ', '')
|
| 284 |
+
dprint("Found gsis_number", gsis_number)
|
| 285 |
+
break
|
| 286 |
+
|
| 287 |
+
i += 1
|
| 288 |
+
|
| 289 |
+
# Compose full name from parts
|
| 290 |
+
if full_name is None:
|
| 291 |
+
full_name = normalize_name_from_parts(last_name_txt, first_name_txt)
|
| 292 |
+
|
| 293 |
+
# Return structured result
|
| 294 |
+
result = {
|
| 295 |
+
"id_type": "prc",
|
| 296 |
+
"crn": crn,
|
| 297 |
+
"id_number": registration_number or crn, # Frontend expects id_number
|
| 298 |
+
"registration_number": registration_number,
|
| 299 |
+
"registration_date": registration_date,
|
| 300 |
+
"valid_until": valid_until,
|
| 301 |
+
"full_name": full_name,
|
| 302 |
+
"birth_date": birth_date,
|
| 303 |
+
"sss_number": sss_number,
|
| 304 |
+
"gsis_number": gsis_number,
|
| 305 |
+
"profession": profession
|
| 306 |
+
}
|
| 307 |
+
dprint("Final result", result)
|
| 308 |
+
return result
|
| 309 |
+
|
| 310 |
+
def extract_ocr_lines(image_path):
|
| 311 |
+
os.makedirs("output", exist_ok=True)
|
| 312 |
+
dprint("Initializing PaddleOCR")
|
| 313 |
+
|
| 314 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 315 |
+
ocr = PaddleOCR(
|
| 316 |
+
use_doc_orientation_classify=False,
|
| 317 |
+
use_doc_unwarping=False,
|
| 318 |
+
use_textline_orientation=False,
|
| 319 |
+
lang='en',
|
| 320 |
+
show_log=False
|
| 321 |
+
)
|
| 322 |
+
dprint("OCR initialized")
|
| 323 |
+
dprint("Running OCR predict", image_path)
|
| 324 |
+
results = ocr.ocr(image_path, cls=False)
|
| 325 |
+
dprint("OCR predict done, results_count", len(results))
|
| 326 |
+
|
| 327 |
+
# Process OCR results directly
|
| 328 |
+
all_text = []
|
| 329 |
+
try:
|
| 330 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 331 |
+
for item in lines:
|
| 332 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 333 |
+
meta = item[1]
|
| 334 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 335 |
+
all_text.append(str(meta[0]))
|
| 336 |
+
except Exception as e:
|
| 337 |
+
dprint("Error processing OCR results", str(e))
|
| 338 |
+
|
| 339 |
+
dprint("All direct texts", all_text)
|
| 340 |
+
return extract_prc_info(all_text) if all_text else {
|
| 341 |
+
"id_type": "prc",
|
| 342 |
+
"crn": None,
|
| 343 |
+
"full_name": None,
|
| 344 |
+
"birth_date": None
|
| 345 |
+
}
|
| 346 |
+
|
| 347 |
+
if len(sys.argv) < 2:
|
| 348 |
+
sys.stdout = original_stdout
|
| 349 |
+
print(json.dumps({"error": "No image URL provided"}))
|
| 350 |
+
sys.exit(1)
|
| 351 |
+
|
| 352 |
+
image_url = sys.argv[1]
|
| 353 |
+
dprint("Processing image URL", image_url)
|
| 354 |
+
try:
|
| 355 |
+
image_path = download_image(image_url)
|
| 356 |
+
dprint("Image downloaded to", image_path)
|
| 357 |
+
ocr_results = extract_ocr_lines(image_path)
|
| 358 |
+
dprint("OCR results ready")
|
| 359 |
+
|
| 360 |
+
# Restore stdout and print only the JSON response
|
| 361 |
+
sys.stdout = original_stdout
|
| 362 |
+
sys.stdout.write(json.dumps({"success": True, "ocr_results": ocr_results}))
|
| 363 |
+
sys.stdout.flush()
|
| 364 |
+
|
| 365 |
+
except Exception as e:
|
| 366 |
+
dprint("Exception", str(e))
|
| 367 |
+
# Restore stdout for error JSON
|
| 368 |
+
sys.stdout = original_stdout
|
| 369 |
+
sys.stdout.write(json.dumps({"error": str(e)}))
|
| 370 |
+
sys.stdout.flush()
|
| 371 |
+
sys.exit(1)
|
| 372 |
+
finally:
|
| 373 |
+
# Clean up
|
| 374 |
+
try:
|
| 375 |
+
clean_cache()
|
| 376 |
+
except:
|
| 377 |
+
pass
|
extract_sss.py
ADDED
|
@@ -0,0 +1,216 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys, json, os, glob, re, requests
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from io import BytesIO
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from contextlib import redirect_stdout, redirect_stderr
|
| 6 |
+
|
| 7 |
+
# Immediately redirect all output to stderr except for our final JSON
|
| 8 |
+
original_stdout = sys.stdout
|
| 9 |
+
sys.stdout = sys.stderr
|
| 10 |
+
|
| 11 |
+
# Suppress all PaddleOCR output
|
| 12 |
+
os.environ['PADDLEOCR_LOG_LEVEL'] = 'ERROR'
|
| 13 |
+
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
|
| 14 |
+
os.environ['DISPLAY'] = ':99'
|
| 15 |
+
|
| 16 |
+
# Import PaddleOCR after setting environment variables
|
| 17 |
+
from paddleocr import PaddleOCR
|
| 18 |
+
|
| 19 |
+
def dprint(msg, obj=None):
|
| 20 |
+
try:
|
| 21 |
+
print(f"DEBUG: {msg}" + (f": {obj}" if obj is not None else ""), file=sys.stderr)
|
| 22 |
+
except Exception:
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
def clean_cache():
|
| 26 |
+
files = ['temp_image.jpg', 'temp_image_ocr_res_img.jpg', 'temp_image_preprocessed_img.jpg', 'temp_image_res.json']
|
| 27 |
+
for f in files:
|
| 28 |
+
if os.path.exists(f):
|
| 29 |
+
os.remove(f)
|
| 30 |
+
dprint("Removed cache file", f)
|
| 31 |
+
if os.path.exists("output"):
|
| 32 |
+
import shutil
|
| 33 |
+
shutil.rmtree("output")
|
| 34 |
+
dprint("Removed output directory")
|
| 35 |
+
|
| 36 |
+
def download_image(url, output_path='temp_image.jpg'):
|
| 37 |
+
dprint("Starting download", url)
|
| 38 |
+
clean_cache()
|
| 39 |
+
r = requests.get(url)
|
| 40 |
+
dprint("HTTP status", r.status_code)
|
| 41 |
+
r.raise_for_status()
|
| 42 |
+
img = Image.open(BytesIO(r.content))
|
| 43 |
+
if img.mode == 'RGBA':
|
| 44 |
+
bg = Image.new('RGB', img.size, (255, 255, 255))
|
| 45 |
+
bg.paste(img, mask=img.split()[-1])
|
| 46 |
+
img = bg
|
| 47 |
+
elif img.mode != 'RGB':
|
| 48 |
+
img = img.convert('RGB')
|
| 49 |
+
img.save(output_path, 'JPEG', quality=95)
|
| 50 |
+
dprint("Saved image", output_path)
|
| 51 |
+
return output_path
|
| 52 |
+
|
| 53 |
+
def cap_words(s):
|
| 54 |
+
return None if not s else ' '.join(w.capitalize() for w in s.split())
|
| 55 |
+
|
| 56 |
+
def normalize_full_name(s):
|
| 57 |
+
if not s:
|
| 58 |
+
return None
|
| 59 |
+
raw = ' '.join(s.split())
|
| 60 |
+
|
| 61 |
+
if ',' in raw:
|
| 62 |
+
parts = [p.strip() for p in raw.split(',')]
|
| 63 |
+
last = parts[0] if parts else ''
|
| 64 |
+
given_block = ','.join(parts[1:]).strip() if len(parts) > 1 else ''
|
| 65 |
+
tokens = [t for t in given_block.split(' ') if t]
|
| 66 |
+
first = tokens[0] if tokens else ''
|
| 67 |
+
second = tokens[1] if len(tokens) > 1 else ''
|
| 68 |
+
if second:
|
| 69 |
+
return f"{first} {second} {last}".strip()
|
| 70 |
+
else:
|
| 71 |
+
return f"{first} {last}".strip()
|
| 72 |
+
else:
|
| 73 |
+
tokens = [t for t in raw.split(' ') if t]
|
| 74 |
+
if len(tokens) >= 3:
|
| 75 |
+
return f"{tokens[0]} {tokens[1]} {tokens[-1]}".strip()
|
| 76 |
+
elif len(tokens) == 2:
|
| 77 |
+
return f"{tokens[0]} {tokens[1]}".strip()
|
| 78 |
+
else:
|
| 79 |
+
return raw
|
| 80 |
+
|
| 81 |
+
def extract_sss_number(text):
|
| 82 |
+
sss_pattern = r'\b(\d{2}-\d{7}-\d{1})\b'
|
| 83 |
+
match = re.search(sss_pattern, text)
|
| 84 |
+
if match:
|
| 85 |
+
return match.group(1)
|
| 86 |
+
return None
|
| 87 |
+
|
| 88 |
+
def extract_sss_info(lines):
|
| 89 |
+
dprint("Lines to extract", lines)
|
| 90 |
+
|
| 91 |
+
full_name = None
|
| 92 |
+
sss_number = None
|
| 93 |
+
sss_id_number = None
|
| 94 |
+
name_parts = []
|
| 95 |
+
|
| 96 |
+
L = [str(x or '').strip() for x in lines]
|
| 97 |
+
i = 0
|
| 98 |
+
while i < len(L):
|
| 99 |
+
line = L[i]
|
| 100 |
+
low = line.lower()
|
| 101 |
+
dprint("Line", {"i": i, "text": line})
|
| 102 |
+
|
| 103 |
+
# Look for SSS number pattern (XX-XXXXXXX-X)
|
| 104 |
+
if not sss_number:
|
| 105 |
+
sss_num = extract_sss_number(line)
|
| 106 |
+
if sss_num:
|
| 107 |
+
sss_number = sss_num
|
| 108 |
+
sss_id_number = sss_num
|
| 109 |
+
dprint("Found SSS number", sss_number)
|
| 110 |
+
|
| 111 |
+
# Collect potential name parts (single words that look like names)
|
| 112 |
+
if re.search(r'^[A-Z]{2,}$', line) and not re.search(r'[0-9]', line):
|
| 113 |
+
skip_words = ['REPUBLIC', 'PHILIPPINES', 'SOCIAL', 'SECURITY', 'SYSTEM', 'SSS', 'PRESIDENT', 'PROUD', 'FILIPINO', 'CORAZON']
|
| 114 |
+
if not any(word in line.upper() for word in skip_words):
|
| 115 |
+
name_parts.append(line)
|
| 116 |
+
dprint("Added name part", line)
|
| 117 |
+
|
| 118 |
+
# Look for multi-word names but don't set full_name yet
|
| 119 |
+
if len(line.split()) >= 2 and re.search(r'[A-Z]{2,}', line) and not re.search(r'[0-9]{3,}', line):
|
| 120 |
+
skip_words = ['REPUBLIC', 'PHILIPPINES', 'SOCIAL', 'SECURITY', 'SYSTEM', 'SSS', 'PRESIDENT', 'PROUD', 'FILIPINO']
|
| 121 |
+
if not any(word in line.upper() for word in skip_words):
|
| 122 |
+
# Add multi-word names to name_parts instead of setting full_name directly
|
| 123 |
+
name_parts.append(line)
|
| 124 |
+
dprint("Added multi-word name part", line)
|
| 125 |
+
|
| 126 |
+
i += 1
|
| 127 |
+
|
| 128 |
+
# Now compose the full name from all collected parts
|
| 129 |
+
if name_parts:
|
| 130 |
+
# Combine all name parts
|
| 131 |
+
combined_name = ' '.join(name_parts)
|
| 132 |
+
full_name = normalize_full_name(combined_name)
|
| 133 |
+
dprint("Composed name from all parts", {"parts": name_parts, "result": full_name})
|
| 134 |
+
|
| 135 |
+
result = {
|
| 136 |
+
"id_type": "sss",
|
| 137 |
+
"sss_number": sss_number,
|
| 138 |
+
"id_number": sss_id_number,
|
| 139 |
+
"full_name": full_name,
|
| 140 |
+
"birth_date": None,
|
| 141 |
+
"address": None,
|
| 142 |
+
"sex": None,
|
| 143 |
+
"nationality": "Filipino"
|
| 144 |
+
}
|
| 145 |
+
dprint("Final result", result)
|
| 146 |
+
return result
|
| 147 |
+
|
| 148 |
+
def extract_ocr_lines(image_path):
|
| 149 |
+
os.makedirs("output", exist_ok=True)
|
| 150 |
+
dprint("Initializing PaddleOCR")
|
| 151 |
+
|
| 152 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 153 |
+
ocr = PaddleOCR(
|
| 154 |
+
use_doc_orientation_classify=False,
|
| 155 |
+
use_doc_unwarping=False,
|
| 156 |
+
use_textline_orientation=False,
|
| 157 |
+
lang='en',
|
| 158 |
+
show_log=False
|
| 159 |
+
)
|
| 160 |
+
dprint("OCR initialized")
|
| 161 |
+
dprint("Running OCR predict", image_path)
|
| 162 |
+
results = ocr.ocr(image_path, cls=False)
|
| 163 |
+
dprint("OCR predict done, results_count", len(results))
|
| 164 |
+
|
| 165 |
+
# Process OCR results directly
|
| 166 |
+
all_text = []
|
| 167 |
+
try:
|
| 168 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 169 |
+
for item in lines:
|
| 170 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 171 |
+
meta = item[1]
|
| 172 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 173 |
+
all_text.append(str(meta[0]))
|
| 174 |
+
except Exception as e:
|
| 175 |
+
dprint("Error processing OCR results", str(e))
|
| 176 |
+
|
| 177 |
+
dprint("All direct texts", all_text)
|
| 178 |
+
return extract_sss_info(all_text) if all_text else {
|
| 179 |
+
"id_type": "sss",
|
| 180 |
+
"sss_number": None,
|
| 181 |
+
"id_number": None,
|
| 182 |
+
"full_name": None,
|
| 183 |
+
"birth_date": None
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
if len(sys.argv) < 2:
|
| 187 |
+
sys.stdout = original_stdout
|
| 188 |
+
print(json.dumps({"error": "No image URL provided"}))
|
| 189 |
+
sys.exit(1)
|
| 190 |
+
|
| 191 |
+
image_url = sys.argv[1]
|
| 192 |
+
dprint("Processing image URL", image_url)
|
| 193 |
+
try:
|
| 194 |
+
image_path = download_image(image_url)
|
| 195 |
+
dprint("Image downloaded to", image_path)
|
| 196 |
+
ocr_results = extract_ocr_lines(image_path)
|
| 197 |
+
dprint("OCR results ready")
|
| 198 |
+
|
| 199 |
+
# Restore stdout and print only the JSON response
|
| 200 |
+
sys.stdout = original_stdout
|
| 201 |
+
sys.stdout.write(json.dumps({"success": True, "ocr_results": ocr_results}))
|
| 202 |
+
sys.stdout.flush()
|
| 203 |
+
|
| 204 |
+
except Exception as e:
|
| 205 |
+
dprint("Exception", str(e))
|
| 206 |
+
# Restore stdout for error JSON
|
| 207 |
+
sys.stdout = original_stdout
|
| 208 |
+
sys.stdout.write(json.dumps({"error": str(e)}))
|
| 209 |
+
sys.stdout.flush()
|
| 210 |
+
sys.exit(1)
|
| 211 |
+
finally:
|
| 212 |
+
# Clean up
|
| 213 |
+
try:
|
| 214 |
+
clean_cache()
|
| 215 |
+
except:
|
| 216 |
+
pass
|
extract_tesda_ocr.py
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys, json, os, glob, re, requests
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from io import BytesIO
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from contextlib import redirect_stdout, redirect_stderr
|
| 6 |
+
|
| 7 |
+
# Immediately redirect all output to stderr except for our final JSON
|
| 8 |
+
original_stdout = sys.stdout
|
| 9 |
+
sys.stdout = sys.stderr
|
| 10 |
+
|
| 11 |
+
# Suppress all PaddleOCR output
|
| 12 |
+
os.environ['PADDLEOCR_LOG_LEVEL'] = 'ERROR'
|
| 13 |
+
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
|
| 14 |
+
os.environ['DISPLAY'] = ':99'
|
| 15 |
+
|
| 16 |
+
# Import PaddleOCR after setting environment variables
|
| 17 |
+
from paddleocr import PaddleOCR
|
| 18 |
+
|
| 19 |
+
def dprint(msg, obj=None):
|
| 20 |
+
try:
|
| 21 |
+
print(f"DEBUG: {msg}" + (f": {obj}" if obj is not None else ""), file=sys.stderr)
|
| 22 |
+
except Exception:
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
def clean_cache():
|
| 26 |
+
cache_files = ['temp_image.jpg', 'temp_image_ocr_res_img.jpg', 'temp_image_preprocessed_img.jpg', 'temp_image_res.json']
|
| 27 |
+
for f in cache_files:
|
| 28 |
+
if os.path.exists(f):
|
| 29 |
+
os.remove(f)
|
| 30 |
+
dprint("Removed cache file", f)
|
| 31 |
+
if os.path.exists("output"):
|
| 32 |
+
import shutil
|
| 33 |
+
shutil.rmtree("output")
|
| 34 |
+
dprint("Removed output directory")
|
| 35 |
+
|
| 36 |
+
def download_image(url, output_path='temp_image.jpg'):
|
| 37 |
+
dprint("Starting download", url)
|
| 38 |
+
clean_cache()
|
| 39 |
+
r = requests.get(url)
|
| 40 |
+
dprint("HTTP status", r.status_code)
|
| 41 |
+
r.raise_for_status()
|
| 42 |
+
img = Image.open(BytesIO(r.content))
|
| 43 |
+
if img.mode == 'RGBA':
|
| 44 |
+
bg = Image.new('RGB', img.size, (255,255,255))
|
| 45 |
+
bg.paste(img, mask=img.split()[-1])
|
| 46 |
+
img = bg
|
| 47 |
+
elif img.mode != 'RGB':
|
| 48 |
+
img = img.convert('RGB')
|
| 49 |
+
img.save(output_path, 'JPEG', quality=95)
|
| 50 |
+
dprint("Saved image", output_path)
|
| 51 |
+
return output_path
|
| 52 |
+
|
| 53 |
+
def format_date(s):
|
| 54 |
+
if not s: return None
|
| 55 |
+
raw = s.strip()
|
| 56 |
+
|
| 57 |
+
# Handle formats like "july 22,2022" (no space after comma)
|
| 58 |
+
raw = raw.replace(',', ', ')
|
| 59 |
+
|
| 60 |
+
t = raw.replace(' ', '').replace('\\','/').replace('.','/')
|
| 61 |
+
if re.match(r'^\d{4}[-/]\d{2}[-/]\d{2}$', t):
|
| 62 |
+
return t.replace('/', '-')
|
| 63 |
+
# Accept mm/dd/yyyy style
|
| 64 |
+
if re.match(r'^\d{2}/\d{2}/\d{4}$', raw):
|
| 65 |
+
m, d, y = raw.split('/')
|
| 66 |
+
return f"{y}-{int(m):02d}-{int(d):02d}"
|
| 67 |
+
# Month name variants - try different formats
|
| 68 |
+
date_formats = [
|
| 69 |
+
"%B %d, %Y", # July 22, 2022
|
| 70 |
+
"%b %d, %Y", # Jul 22, 2022
|
| 71 |
+
"%B %d %Y", # July 22 2022
|
| 72 |
+
"%b %d %Y", # Jul 22 2022
|
| 73 |
+
]
|
| 74 |
+
|
| 75 |
+
for fmt in date_formats:
|
| 76 |
+
try:
|
| 77 |
+
return datetime.strptime(raw.replace(' ', ' '), fmt).strftime("%Y-%m-%d")
|
| 78 |
+
except Exception:
|
| 79 |
+
continue
|
| 80 |
+
|
| 81 |
+
return raw
|
| 82 |
+
|
| 83 |
+
def cap_words(name):
|
| 84 |
+
return None if not name else ' '.join(w.capitalize() for w in name.split())
|
| 85 |
+
|
| 86 |
+
def normalize_name_from_parts(last, first_block):
|
| 87 |
+
last = (last or '').strip()
|
| 88 |
+
tokens = [t for t in (first_block or '').strip().split(' ') if t]
|
| 89 |
+
given_kept = tokens[:2] # keep up to two given names
|
| 90 |
+
composed = ' '.join(given_kept + [last]).strip()
|
| 91 |
+
return cap_words(composed) if composed else None
|
| 92 |
+
|
| 93 |
+
def take_within(lines, i, k=5):
|
| 94 |
+
out = []
|
| 95 |
+
for j in range(1, k+1):
|
| 96 |
+
if i+j < len(lines):
|
| 97 |
+
t = str(lines[i+j]).strip()
|
| 98 |
+
if t:
|
| 99 |
+
out.append(t)
|
| 100 |
+
return out
|
| 101 |
+
|
| 102 |
+
def extract_number_from_text(text):
|
| 103 |
+
# Remove all non-digit characters and return the result
|
| 104 |
+
return ''.join(c for c in text if c.isdigit())
|
| 105 |
+
|
| 106 |
+
def extract_tesda_info(lines):
|
| 107 |
+
dprint("Lines to extract", lines)
|
| 108 |
+
|
| 109 |
+
certificate_number = None
|
| 110 |
+
uli_number = None
|
| 111 |
+
cln_nq_number = None
|
| 112 |
+
full_name = None
|
| 113 |
+
|
| 114 |
+
# Collect name pieces
|
| 115 |
+
last_name_txt = None
|
| 116 |
+
first_name_txt = None
|
| 117 |
+
|
| 118 |
+
# Initialize other variables
|
| 119 |
+
issued_date = None
|
| 120 |
+
valid_until = None
|
| 121 |
+
qualification = None
|
| 122 |
+
qualification_level = None
|
| 123 |
+
|
| 124 |
+
L = [str(x or '').strip() for x in lines]
|
| 125 |
+
i = 0
|
| 126 |
+
while i < len(L):
|
| 127 |
+
line = L[i]
|
| 128 |
+
low = line.lower()
|
| 129 |
+
dprint("Line", {"i": i, "text": line})
|
| 130 |
+
|
| 131 |
+
# Certificate Number - more flexible pattern matching
|
| 132 |
+
if certificate_number is None:
|
| 133 |
+
# Try different patterns
|
| 134 |
+
cert_patterns = [
|
| 135 |
+
r'certificate\s*no\.?\s*(\d{14})',
|
| 136 |
+
r'certificate\s*number[:\s]*(\d{14})',
|
| 137 |
+
r'cert\s*no\.?\s*(\d{14})',
|
| 138 |
+
r'(\d{14})'
|
| 139 |
+
]
|
| 140 |
+
|
| 141 |
+
for pattern in cert_patterns:
|
| 142 |
+
match = re.search(pattern, low)
|
| 143 |
+
if match:
|
| 144 |
+
certificate_number = match.group(1)
|
| 145 |
+
dprint("Found certificate number", certificate_number)
|
| 146 |
+
break
|
| 147 |
+
|
| 148 |
+
# If not found in current line, check next lines
|
| 149 |
+
if not certificate_number:
|
| 150 |
+
ahead = take_within(L, i, 3)
|
| 151 |
+
for t in ahead:
|
| 152 |
+
# Look for 14-digit number
|
| 153 |
+
nums = extract_number_from_text(t)
|
| 154 |
+
if len(nums) == 14:
|
| 155 |
+
certificate_number = nums
|
| 156 |
+
dprint("Found certificate number in next lines", certificate_number)
|
| 157 |
+
break
|
| 158 |
+
|
| 159 |
+
# ULI Number
|
| 160 |
+
if uli_number is None and ('uli' in low or 'ops-' in low):
|
| 161 |
+
uli_pattern = r'(?:uli:?)?\s*([a-zA-Z]{3}-\d{2}-\d{3}-\d{5}-\d{3})'
|
| 162 |
+
match = re.search(uli_pattern, low, re.IGNORECASE)
|
| 163 |
+
if match:
|
| 164 |
+
uli_number = match.group(1).upper()
|
| 165 |
+
dprint("Found ULI number", uli_number)
|
| 166 |
+
|
| 167 |
+
# CLN-NQ Number
|
| 168 |
+
if cln_nq_number is None and ('cln' in low or 'nq' in low):
|
| 169 |
+
cln_pattern = r'(?:cln-nq-?)?(\d{7})'
|
| 170 |
+
match = re.search(cln_pattern, low)
|
| 171 |
+
if match:
|
| 172 |
+
cln_nq_number = match.group(1)
|
| 173 |
+
dprint("Found CLN-NQ number", cln_nq_number)
|
| 174 |
+
|
| 175 |
+
# Name appears after "is awarded to"
|
| 176 |
+
if 'awarded to' in low:
|
| 177 |
+
ahead = take_within(L, i, 3)
|
| 178 |
+
for t in ahead:
|
| 179 |
+
if t and not any(k in t.lower() for k in ['awarded', 'certificate', 'valid', 'for having']):
|
| 180 |
+
# Clean up the name - remove periods and fix spacing
|
| 181 |
+
cleaned_name = t.replace('.', ' ').replace(' ', ' ').strip()
|
| 182 |
+
full_name = cap_words(cleaned_name)
|
| 183 |
+
|
| 184 |
+
# Try to split into components
|
| 185 |
+
parts = full_name.split()
|
| 186 |
+
if len(parts) >= 2:
|
| 187 |
+
last_name_txt = parts[-1]
|
| 188 |
+
first_name_txt = ' '.join(parts[:-1])
|
| 189 |
+
dprint("Found full name", full_name)
|
| 190 |
+
break
|
| 191 |
+
|
| 192 |
+
# Qualification Level
|
| 193 |
+
if qualification_level is None and ('national certificate' in low or 'nc' in low):
|
| 194 |
+
qualification_level = cap_words(line)
|
| 195 |
+
dprint("Found qualification level", qualification_level)
|
| 196 |
+
|
| 197 |
+
# Qualification/Specialization
|
| 198 |
+
if qualification is None and ('in' in low and len(line.split()) > 1):
|
| 199 |
+
if i+1 < len(L):
|
| 200 |
+
qualification = cap_words(L[i+1])
|
| 201 |
+
dprint("Found qualification", qualification)
|
| 202 |
+
|
| 203 |
+
# Issued Date
|
| 204 |
+
if issued_date is None and ('issued' in low):
|
| 205 |
+
date_pattern = r'(?:issued\s*(?:on|:)?\s*)?([A-Za-z]+\s+\d{1,2},?\s*\d{4})'
|
| 206 |
+
match = re.search(date_pattern, low)
|
| 207 |
+
if match:
|
| 208 |
+
issued_date = format_date(match.group(1))
|
| 209 |
+
dprint("Found issued date", issued_date)
|
| 210 |
+
|
| 211 |
+
# Valid Until Date
|
| 212 |
+
if valid_until is None and ('valid' in low):
|
| 213 |
+
date_pattern = r'(?:valid\s*until\s*)?([A-Za-z]+\s+\d{1,2},?\s*\d{4})'
|
| 214 |
+
match = re.search(date_pattern, low)
|
| 215 |
+
if match:
|
| 216 |
+
valid_until = format_date(match.group(1))
|
| 217 |
+
dprint("Found valid until date", valid_until)
|
| 218 |
+
|
| 219 |
+
i += 1
|
| 220 |
+
|
| 221 |
+
# Compose name at the end
|
| 222 |
+
if full_name is None:
|
| 223 |
+
full_name = normalize_name_from_parts(last_name_txt, first_name_txt)
|
| 224 |
+
|
| 225 |
+
# Get first and last 4 digits of certificate number if available
|
| 226 |
+
cert_first_four = certificate_number[:4] if certificate_number else None
|
| 227 |
+
cert_last_four = certificate_number[-4:] if certificate_number else None
|
| 228 |
+
|
| 229 |
+
result = {
|
| 230 |
+
"id_type": "tesda",
|
| 231 |
+
"certificate_number": certificate_number,
|
| 232 |
+
"cert_first_four": cert_first_four,
|
| 233 |
+
"cert_last_four": cert_last_four,
|
| 234 |
+
"uli_number": uli_number,
|
| 235 |
+
"cln_nq_number": cln_nq_number,
|
| 236 |
+
"full_name": full_name,
|
| 237 |
+
"first_name": first_name_txt,
|
| 238 |
+
"last_name": last_name_txt,
|
| 239 |
+
"qualification_level": qualification_level,
|
| 240 |
+
"qualification": qualification,
|
| 241 |
+
"issued_date": issued_date,
|
| 242 |
+
"valid_until": valid_until
|
| 243 |
+
}
|
| 244 |
+
dprint("Final result", result)
|
| 245 |
+
return result
|
| 246 |
+
|
| 247 |
+
def extract_ocr_lines(image_path):
|
| 248 |
+
os.makedirs("output", exist_ok=True)
|
| 249 |
+
dprint("Initializing PaddleOCR")
|
| 250 |
+
|
| 251 |
+
# Redirect both stdout and stderr during PaddleOCR operations
|
| 252 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 253 |
+
ocr = PaddleOCR(
|
| 254 |
+
use_doc_orientation_classify=False,
|
| 255 |
+
use_doc_unwarping=False,
|
| 256 |
+
use_textline_orientation=False,
|
| 257 |
+
lang='en',
|
| 258 |
+
show_log=False
|
| 259 |
+
)
|
| 260 |
+
dprint("OCR initialized")
|
| 261 |
+
dprint("Running OCR", image_path)
|
| 262 |
+
results = ocr.ocr(image_path)
|
| 263 |
+
|
| 264 |
+
dprint("OCR done, results_count", len(results))
|
| 265 |
+
|
| 266 |
+
all_text = []
|
| 267 |
+
try:
|
| 268 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 269 |
+
for item in lines:
|
| 270 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 271 |
+
meta = item[1]
|
| 272 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 273 |
+
all_text.append(str(meta[0]))
|
| 274 |
+
except Exception as e:
|
| 275 |
+
dprint("Error processing OCR results", str(e))
|
| 276 |
+
|
| 277 |
+
dprint("All direct texts", all_text)
|
| 278 |
+
return extract_tesda_info(all_text) if all_text else {
|
| 279 |
+
"id_type": "tesda",
|
| 280 |
+
"certificate_number": None,
|
| 281 |
+
"cert_first_four": None,
|
| 282 |
+
"cert_last_four": None,
|
| 283 |
+
"uli_number": None,
|
| 284 |
+
"cln_nq_number": None,
|
| 285 |
+
"full_name": None,
|
| 286 |
+
"first_name": None,
|
| 287 |
+
"last_name": None,
|
| 288 |
+
"qualification_level": None,
|
| 289 |
+
"qualification": None,
|
| 290 |
+
"issued_date": None,
|
| 291 |
+
"valid_until": None
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
if len(sys.argv) < 2:
|
| 295 |
+
sys.stdout = original_stdout
|
| 296 |
+
print(json.dumps({"success": False, "error": "No image URL provided"}))
|
| 297 |
+
sys.exit(1)
|
| 298 |
+
|
| 299 |
+
image_url = sys.argv[1]
|
| 300 |
+
dprint("Processing image URL", image_url)
|
| 301 |
+
|
| 302 |
+
try:
|
| 303 |
+
image_path = download_image(image_url)
|
| 304 |
+
dprint("Image downloaded to", image_path)
|
| 305 |
+
ocr_results = extract_ocr_lines(image_path)
|
| 306 |
+
dprint("OCR results ready", ocr_results)
|
| 307 |
+
|
| 308 |
+
# Create the response object
|
| 309 |
+
response = {
|
| 310 |
+
"success": True,
|
| 311 |
+
"ocr_results": ocr_results
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
# Restore stdout and print only the JSON response
|
| 315 |
+
sys.stdout = original_stdout
|
| 316 |
+
sys.stdout.write(json.dumps(response))
|
| 317 |
+
sys.stdout.flush()
|
| 318 |
+
|
| 319 |
+
except Exception as e:
|
| 320 |
+
dprint("Exception", str(e))
|
| 321 |
+
# Restore stdout for error JSON
|
| 322 |
+
sys.stdout = original_stdout
|
| 323 |
+
sys.stdout.write(json.dumps({"success": False, "error": str(e)}))
|
| 324 |
+
sys.stdout.flush()
|
| 325 |
+
sys.exit(1)
|
| 326 |
+
finally:
|
| 327 |
+
# Clean up
|
| 328 |
+
try:
|
| 329 |
+
clean_cache()
|
| 330 |
+
except:
|
| 331 |
+
pass
|
extract_umid.py
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys, json, os, glob, re, requests
|
| 2 |
+
from PIL import Image
|
| 3 |
+
from io import BytesIO
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from contextlib import redirect_stdout, redirect_stderr
|
| 6 |
+
|
| 7 |
+
# Immediately redirect all output to stderr except for our final JSON
|
| 8 |
+
original_stdout = sys.stdout
|
| 9 |
+
sys.stdout = sys.stderr
|
| 10 |
+
|
| 11 |
+
# Suppress all PaddleOCR output
|
| 12 |
+
os.environ['PADDLEOCR_LOG_LEVEL'] = 'ERROR'
|
| 13 |
+
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
|
| 14 |
+
os.environ['DISPLAY'] = ':99'
|
| 15 |
+
|
| 16 |
+
# Import PaddleOCR after setting environment variables
|
| 17 |
+
from paddleocr import PaddleOCR
|
| 18 |
+
|
| 19 |
+
def dprint(msg, obj=None):
|
| 20 |
+
try:
|
| 21 |
+
print(f"DEBUG: {msg}" + (f": {obj}" if obj is not None else ""), file=sys.stderr)
|
| 22 |
+
except Exception:
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
def clean_cache():
|
| 26 |
+
files = ['temp_image.jpg', 'temp_image_ocr_res_img.jpg', 'temp_image_preprocessed_img.jpg', 'temp_image_res.json']
|
| 27 |
+
for f in files:
|
| 28 |
+
if os.path.exists(f):
|
| 29 |
+
os.remove(f)
|
| 30 |
+
dprint("Removed cache file", f)
|
| 31 |
+
if os.path.exists("output"):
|
| 32 |
+
import shutil
|
| 33 |
+
shutil.rmtree("output")
|
| 34 |
+
dprint("Removed output directory")
|
| 35 |
+
|
| 36 |
+
def download_image(url, output_path='temp_image.jpg'):
|
| 37 |
+
dprint("Starting download", url)
|
| 38 |
+
clean_cache()
|
| 39 |
+
r = requests.get(url)
|
| 40 |
+
dprint("HTTP status", r.status_code)
|
| 41 |
+
r.raise_for_status()
|
| 42 |
+
img = Image.open(BytesIO(r.content))
|
| 43 |
+
if img.mode == 'RGBA':
|
| 44 |
+
bg = Image.new('RGB', img.size, (255, 255, 255))
|
| 45 |
+
bg.paste(img, mask=img.split()[-1])
|
| 46 |
+
img = bg
|
| 47 |
+
elif img.mode != 'RGB':
|
| 48 |
+
img = img.convert('RGB')
|
| 49 |
+
img.save(output_path, 'JPEG', quality=95)
|
| 50 |
+
dprint("Saved image", output_path)
|
| 51 |
+
return output_path
|
| 52 |
+
|
| 53 |
+
def format_date(s):
|
| 54 |
+
if not s:
|
| 55 |
+
return None
|
| 56 |
+
raw = str(s).strip()
|
| 57 |
+
t = raw.replace(' ', '').replace('\\','/').replace('.','/')
|
| 58 |
+
# 1960/01/28 or 1960-01-28
|
| 59 |
+
if re.match(r'^\d{4}[-/]\d{2}[-/]\d{2}$', t):
|
| 60 |
+
return t.replace('/', '-')
|
| 61 |
+
# 01/28/1960
|
| 62 |
+
if re.match(r'^\d{2}/\d{2}/\d{4}$', raw):
|
| 63 |
+
m, d, y = raw.split('/')
|
| 64 |
+
return f"{y}-{int(m):02d}-{int(d):02d}"
|
| 65 |
+
# Month name variants
|
| 66 |
+
try:
|
| 67 |
+
return datetime.strptime(raw, "%B %d, %Y").strftime("%Y-%m-%d")
|
| 68 |
+
except Exception:
|
| 69 |
+
pass
|
| 70 |
+
try:
|
| 71 |
+
return datetime.strptime(raw, "%b %d, %Y").strftime("%Y-%m-%d")
|
| 72 |
+
except Exception:
|
| 73 |
+
pass
|
| 74 |
+
return raw
|
| 75 |
+
|
| 76 |
+
def cap_words(s):
|
| 77 |
+
return None if not s else ' '.join(w.capitalize() for w in s.split())
|
| 78 |
+
|
| 79 |
+
def take_within(lines, i, k=5):
|
| 80 |
+
out = []
|
| 81 |
+
for j in range(1, k+1):
|
| 82 |
+
if i+j < len(lines):
|
| 83 |
+
t = str(lines[i+j]).strip()
|
| 84 |
+
if t:
|
| 85 |
+
out.append(t)
|
| 86 |
+
return out
|
| 87 |
+
|
| 88 |
+
def is_crn_text(t):
|
| 89 |
+
# UMID CRN like 0028-1215160-9 or 002812151609
|
| 90 |
+
z = str(t).strip()
|
| 91 |
+
return bool(re.match(r'^\d{4}-\d{7}-\d$', z)) or bool(re.match(r'^\d{12,13}$', z))
|
| 92 |
+
|
| 93 |
+
def normalize_name(last, given):
|
| 94 |
+
last = (last or '').strip()
|
| 95 |
+
tokens = [t for t in (given or '').strip().split(' ') if t]
|
| 96 |
+
kept = tokens[:2] # First [+Second], ignore middles beyond that
|
| 97 |
+
name = ' '.join(kept + [last]).strip()
|
| 98 |
+
return cap_words(name) if name else None
|
| 99 |
+
|
| 100 |
+
def glue_address(lines, start_idx):
|
| 101 |
+
parts = []
|
| 102 |
+
stop_labels = ['crn', 'surname', 'given', 'middle', 'sex', 'date', 'birth']
|
| 103 |
+
for k in range(1, 6): # take up to 5 lines after ADDRESS
|
| 104 |
+
idx = start_idx + k
|
| 105 |
+
if idx >= len(lines):
|
| 106 |
+
break
|
| 107 |
+
t = str(lines[idx]).strip()
|
| 108 |
+
if not t:
|
| 109 |
+
continue
|
| 110 |
+
low = t.lower()
|
| 111 |
+
if any(lbl in low for lbl in stop_labels):
|
| 112 |
+
break
|
| 113 |
+
parts.append(t)
|
| 114 |
+
# collapse extra spaces and commas
|
| 115 |
+
address = ', '.join(parts)
|
| 116 |
+
address = re.sub(r'\s{2,}', ' ', address)
|
| 117 |
+
address = address.replace(' ,', ',')
|
| 118 |
+
return address or None
|
| 119 |
+
|
| 120 |
+
def extract_umid_info(lines):
|
| 121 |
+
dprint("Lines to extract", lines)
|
| 122 |
+
|
| 123 |
+
crn = None
|
| 124 |
+
full_name = None
|
| 125 |
+
birth_date = None
|
| 126 |
+
sex = None
|
| 127 |
+
address = None
|
| 128 |
+
|
| 129 |
+
last_name_txt = None
|
| 130 |
+
given_name_txt = None
|
| 131 |
+
|
| 132 |
+
L = [str(x or '').strip() for x in lines]
|
| 133 |
+
i = 0
|
| 134 |
+
while i < len(L):
|
| 135 |
+
line = L[i]
|
| 136 |
+
low = line.lower()
|
| 137 |
+
dprint("Line", {"i": i, "text": line})
|
| 138 |
+
|
| 139 |
+
# CRN
|
| 140 |
+
crn_candidate = extract_crn_from_text(line)
|
| 141 |
+
if crn is None and crn_candidate:
|
| 142 |
+
crn = crn_candidate
|
| 143 |
+
dprint("Found CRN", crn)
|
| 144 |
+
elif crn is None and i+1 < len(L):
|
| 145 |
+
crn_candidate = extract_crn_from_text(L[i+1])
|
| 146 |
+
if crn_candidate:
|
| 147 |
+
crn = crn_candidate
|
| 148 |
+
dprint("Found CRN (next)", crn)
|
| 149 |
+
|
| 150 |
+
# Surname / Given Name / Middle Name (ignore middle)
|
| 151 |
+
if 'surname' in low:
|
| 152 |
+
ahead = take_within(L, i, 3)
|
| 153 |
+
for t in ahead:
|
| 154 |
+
tl = t.lower()
|
| 155 |
+
if not any(k in tl for k in ['given', 'middle', 'sex', 'date', 'birth', 'address', 'crn']):
|
| 156 |
+
last_name_txt = t
|
| 157 |
+
dprint("Captured last_name", last_name_txt)
|
| 158 |
+
break
|
| 159 |
+
|
| 160 |
+
if 'given name' in low or 'given' in low:
|
| 161 |
+
if i+1 < len(L):
|
| 162 |
+
# sometimes value is same line (rare), often next line
|
| 163 |
+
val = L[i+1] if L[i+1] else line.replace('given name', '').strip()
|
| 164 |
+
given_name_txt = val if val else None
|
| 165 |
+
dprint("Captured given_name", given_name_txt)
|
| 166 |
+
|
| 167 |
+
# Sex and Date of Birth (handles same-line case like "SEXM DATEOFBIRTH 196O/O1/28")
|
| 168 |
+
if 'sex' in low:
|
| 169 |
+
# sex inline
|
| 170 |
+
if re.search(r'\bF(EMALE)?\b', line, flags=re.I): sex = 'F'
|
| 171 |
+
if re.search(r'\bM(ALE)?\b', line, flags=re.I): sex = 'M'
|
| 172 |
+
# lookahead fallback
|
| 173 |
+
if sex is None:
|
| 174 |
+
for t in take_within(L, i, 2):
|
| 175 |
+
tt = t.strip().upper()
|
| 176 |
+
if tt in ('M','F','MALE','FEMALE'):
|
| 177 |
+
sex = 'M' if tt.startswith('M') else 'F'
|
| 178 |
+
break
|
| 179 |
+
|
| 180 |
+
# Date of Birth inline or in next tokens
|
| 181 |
+
if 'date' in low and 'birth' in low:
|
| 182 |
+
# inline date
|
| 183 |
+
m = re.search(r'(\d[0-9OIl]{3}[/\-][0-1OIl]\d[/\-]\d{2,4})', line)
|
| 184 |
+
if m:
|
| 185 |
+
birth_date = format_date(normalize_digits(m.group(1)))
|
| 186 |
+
dprint("Found birth_date (inline)", birth_date)
|
| 187 |
+
if birth_date is None:
|
| 188 |
+
for t in take_within(L, i, 3):
|
| 189 |
+
# fix digits then try
|
| 190 |
+
cand = normalize_digits(t)
|
| 191 |
+
if re.search(r'\d', cand) and format_date(cand):
|
| 192 |
+
birth_date = format_date(cand)
|
| 193 |
+
dprint("Found birth_date (lookahead)", birth_date)
|
| 194 |
+
break
|
| 195 |
+
|
| 196 |
+
# Also catch standalone yyyy/mm/dd line
|
| 197 |
+
if birth_date is None and re.match(r'^\d{4}[-/]\d{2}[-/]\d{2}$', normalize_digits(line)):
|
| 198 |
+
birth_date = format_date(normalize_digits(line))
|
| 199 |
+
dprint("Found standalone birth_date", birth_date)
|
| 200 |
+
|
| 201 |
+
# Address block
|
| 202 |
+
if 'address' in low and address is None:
|
| 203 |
+
address = glue_address(L, i)
|
| 204 |
+
dprint("Found address", address)
|
| 205 |
+
|
| 206 |
+
i += 1
|
| 207 |
+
|
| 208 |
+
# Compose final name
|
| 209 |
+
if full_name is None:
|
| 210 |
+
full_name = normalize_name(last_name_txt, given_name_txt)
|
| 211 |
+
dprint("Composed full_name", {"last": last_name_txt, "given": given_name_txt, "full": full_name})
|
| 212 |
+
|
| 213 |
+
result = {
|
| 214 |
+
"id_type": "umid",
|
| 215 |
+
"crn": crn,
|
| 216 |
+
"id_number": crn, # frontend expects this
|
| 217 |
+
"full_name": full_name,
|
| 218 |
+
"birth_date": birth_date,
|
| 219 |
+
"sex": sex,
|
| 220 |
+
"address": address
|
| 221 |
+
}
|
| 222 |
+
dprint("Final result", result)
|
| 223 |
+
return result
|
| 224 |
+
|
| 225 |
+
def extract_ocr_lines(image_path):
|
| 226 |
+
os.makedirs("output", exist_ok=True)
|
| 227 |
+
dprint("Initializing PaddleOCR")
|
| 228 |
+
|
| 229 |
+
with redirect_stdout(sys.stderr), redirect_stderr(sys.stderr):
|
| 230 |
+
ocr = PaddleOCR(
|
| 231 |
+
use_doc_orientation_classify=False,
|
| 232 |
+
use_doc_unwarping=False,
|
| 233 |
+
use_textline_orientation=False,
|
| 234 |
+
lang='en',
|
| 235 |
+
show_log=False
|
| 236 |
+
)
|
| 237 |
+
dprint("OCR initialized")
|
| 238 |
+
dprint("Running OCR predict", image_path)
|
| 239 |
+
results = ocr.ocr(image_path, cls=False)
|
| 240 |
+
dprint("OCR predict done, results_count", len(results))
|
| 241 |
+
|
| 242 |
+
# Process OCR results directly
|
| 243 |
+
all_text = []
|
| 244 |
+
try:
|
| 245 |
+
lines = results[0] if results and isinstance(results[0], list) else results
|
| 246 |
+
for item in lines:
|
| 247 |
+
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
| 248 |
+
meta = item[1]
|
| 249 |
+
if isinstance(meta, (list, tuple)) and len(meta) >= 1:
|
| 250 |
+
all_text.append(str(meta[0]))
|
| 251 |
+
except Exception as e:
|
| 252 |
+
dprint("Error processing OCR results", str(e))
|
| 253 |
+
|
| 254 |
+
dprint("All direct texts", all_text)
|
| 255 |
+
return extract_umid_info(all_text) if all_text else {
|
| 256 |
+
"id_type": "umid",
|
| 257 |
+
"crn": None,
|
| 258 |
+
"id_number": None,
|
| 259 |
+
"full_name": None,
|
| 260 |
+
"birth_date": None
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
def normalize_digits(s):
|
| 264 |
+
# Fix common OCR digit confusions: O→0, o→0, I/l→1, S→5, B→8
|
| 265 |
+
return (
|
| 266 |
+
str(s)
|
| 267 |
+
.replace('O','0').replace('o','0')
|
| 268 |
+
.replace('I','1').replace('l','1')
|
| 269 |
+
.replace('S','5')
|
| 270 |
+
.replace('B','8')
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
def extract_crn_from_text(t):
|
| 274 |
+
# Accept "CRN-0028-1215160-9" or plain digits/hyphens
|
| 275 |
+
m = re.search(r'crn[^0-9]*([0-9OIl\-]{10,})', t, flags=re.IGNORECASE)
|
| 276 |
+
if m:
|
| 277 |
+
val = normalize_digits(m.group(1))
|
| 278 |
+
# Keep hyphens; also accept compact digits
|
| 279 |
+
if re.match(r'^\d{4}-\d{7}-\d$', val) or re.match(r'^\d{12,13}$', val):
|
| 280 |
+
return val
|
| 281 |
+
# Or whole token is the number
|
| 282 |
+
val = normalize_digits(t.strip())
|
| 283 |
+
if re.match(r'^\d{4}-\d{7}-\d$', val) or re.match(r'^\d{12,13}$', val):
|
| 284 |
+
return val
|
| 285 |
+
return None
|
| 286 |
+
|
| 287 |
+
if len(sys.argv) < 2:
|
| 288 |
+
sys.stdout = original_stdout
|
| 289 |
+
print(json.dumps({"error": "No image URL provided"}))
|
| 290 |
+
sys.exit(1)
|
| 291 |
+
|
| 292 |
+
image_url = sys.argv[1]
|
| 293 |
+
dprint("Processing image URL", image_url)
|
| 294 |
+
try:
|
| 295 |
+
image_path = download_image(image_url)
|
| 296 |
+
dprint("Image downloaded to", image_path)
|
| 297 |
+
ocr_results = extract_ocr_lines(image_path)
|
| 298 |
+
dprint("OCR results ready")
|
| 299 |
+
|
| 300 |
+
# Restore stdout and print only the JSON response
|
| 301 |
+
sys.stdout = original_stdout
|
| 302 |
+
sys.stdout.write(json.dumps({"success": True, "ocr_results": ocr_results}))
|
| 303 |
+
sys.stdout.flush()
|
| 304 |
+
|
| 305 |
+
except Exception as e:
|
| 306 |
+
dprint("Exception", str(e))
|
| 307 |
+
# Restore stdout for error JSON
|
| 308 |
+
sys.stdout = original_stdout
|
| 309 |
+
sys.stdout.write(json.dumps({"error": str(e)}))
|
| 310 |
+
sys.stdout.flush()
|
| 311 |
+
sys.exit(1)
|
| 312 |
+
finally:
|
| 313 |
+
# Clean up
|
| 314 |
+
try:
|
| 315 |
+
clean_cache()
|
| 316 |
+
except:
|
| 317 |
+
pass
|
requirements.txt
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
paddleocr>=2.7.0
|
| 2 |
+
paddlepaddle>=2.5.0
|
| 3 |
+
opencv-python-headless>=4.8.0
|
| 4 |
+
pillow>=10.0.0
|
| 5 |
+
numpy>=1.24.0
|
| 6 |
+
requests>=2.31.0
|
| 7 |
+
flask>=2.3.0
|
| 8 |
+
gunicorn>=21.2.0
|
| 9 |
+
shapely>=2.0.0
|
| 10 |
+
pyclipper>=1.3.0
|
| 11 |
+
imgaug>=0.4.0
|
| 12 |
+
lmdb>=1.4.0
|
| 13 |
+
tqdm>=4.65.0
|
| 14 |
+
|
test_api.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Test script for HandyHome OCR API
|
| 4 |
+
Tests all endpoints with sample document URLs
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import requests
|
| 8 |
+
import json
|
| 9 |
+
import sys
|
| 10 |
+
|
| 11 |
+
# Configuration
|
| 12 |
+
BASE_URL = "http://localhost:7860" # Change to your Hugging Face Space URL for remote testing
|
| 13 |
+
|
| 14 |
+
# Sample test URLs (replace with your actual test image URLs)
|
| 15 |
+
TEST_URLS = {
|
| 16 |
+
'national_id': 'https://example.com/national_id.jpg',
|
| 17 |
+
'drivers_license': 'https://example.com/drivers_license.jpg',
|
| 18 |
+
'prc': 'https://example.com/prc.jpg',
|
| 19 |
+
'umid': 'https://example.com/umid.jpg',
|
| 20 |
+
'sss': 'https://example.com/sss.jpg',
|
| 21 |
+
'passport': 'https://example.com/passport.jpg',
|
| 22 |
+
'postal': 'https://example.com/postal.jpg',
|
| 23 |
+
'phic': 'https://example.com/phic.jpg',
|
| 24 |
+
'nbi': 'https://example.com/nbi.jpg',
|
| 25 |
+
'police': 'https://example.com/police.jpg',
|
| 26 |
+
'tesda': 'https://example.com/tesda.jpg',
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
def test_health():
|
| 30 |
+
"""Test health endpoint"""
|
| 31 |
+
print("\n" + "="*60)
|
| 32 |
+
print("Testing Health Endpoint")
|
| 33 |
+
print("="*60)
|
| 34 |
+
|
| 35 |
+
try:
|
| 36 |
+
response = requests.get(f"{BASE_URL}/health", timeout=10)
|
| 37 |
+
print(f"Status Code: {response.status_code}")
|
| 38 |
+
print(f"Response: {json.dumps(response.json(), indent=2)}")
|
| 39 |
+
return response.status_code == 200
|
| 40 |
+
except Exception as e:
|
| 41 |
+
print(f"Error: {str(e)}")
|
| 42 |
+
return False
|
| 43 |
+
|
| 44 |
+
def test_index():
|
| 45 |
+
"""Test index/documentation endpoint"""
|
| 46 |
+
print("\n" + "="*60)
|
| 47 |
+
print("Testing Index/Documentation Endpoint")
|
| 48 |
+
print("="*60)
|
| 49 |
+
|
| 50 |
+
try:
|
| 51 |
+
response = requests.get(f"{BASE_URL}/", timeout=10)
|
| 52 |
+
print(f"Status Code: {response.status_code}")
|
| 53 |
+
data = response.json()
|
| 54 |
+
print(f"Service: {data.get('service')}")
|
| 55 |
+
print(f"Version: {data.get('version')}")
|
| 56 |
+
print(f"Available Endpoints: {len(data.get('endpoints', {}))}")
|
| 57 |
+
return response.status_code == 200
|
| 58 |
+
except Exception as e:
|
| 59 |
+
print(f"Error: {str(e)}")
|
| 60 |
+
return False
|
| 61 |
+
|
| 62 |
+
def test_extraction_endpoint(endpoint, document_type, document_url):
|
| 63 |
+
"""Test an OCR extraction endpoint"""
|
| 64 |
+
print("\n" + "="*60)
|
| 65 |
+
print(f"Testing: {document_type}")
|
| 66 |
+
print("="*60)
|
| 67 |
+
print(f"Endpoint: {endpoint}")
|
| 68 |
+
print(f"Document URL: {document_url}")
|
| 69 |
+
|
| 70 |
+
try:
|
| 71 |
+
response = requests.post(
|
| 72 |
+
f"{BASE_URL}{endpoint}",
|
| 73 |
+
json={'document_url': document_url},
|
| 74 |
+
timeout=300
|
| 75 |
+
)
|
| 76 |
+
print(f"Status Code: {response.status_code}")
|
| 77 |
+
|
| 78 |
+
try:
|
| 79 |
+
data = response.json()
|
| 80 |
+
print(f"Response: {json.dumps(data, indent=2)}")
|
| 81 |
+
return response.status_code == 200 and data.get('success', False)
|
| 82 |
+
except:
|
| 83 |
+
print(f"Response (non-JSON): {response.text[:200]}")
|
| 84 |
+
return False
|
| 85 |
+
|
| 86 |
+
except requests.exceptions.Timeout:
|
| 87 |
+
print("Error: Request timed out (>5 minutes)")
|
| 88 |
+
return False
|
| 89 |
+
except Exception as e:
|
| 90 |
+
print(f"Error: {str(e)}")
|
| 91 |
+
return False
|
| 92 |
+
|
| 93 |
+
def main():
|
| 94 |
+
"""Run all tests"""
|
| 95 |
+
print("\n" + "="*60)
|
| 96 |
+
print("HandyHome OCR API Test Suite")
|
| 97 |
+
print("="*60)
|
| 98 |
+
print(f"Base URL: {BASE_URL}")
|
| 99 |
+
|
| 100 |
+
results = {}
|
| 101 |
+
|
| 102 |
+
# Test utility endpoints
|
| 103 |
+
results['health'] = test_health()
|
| 104 |
+
results['index'] = test_index()
|
| 105 |
+
|
| 106 |
+
# Test OCR endpoints
|
| 107 |
+
endpoints_to_test = [
|
| 108 |
+
('/api/extract-national-id', 'National ID', TEST_URLS['national_id']),
|
| 109 |
+
('/api/extract-drivers-license', "Driver's License", TEST_URLS['drivers_license']),
|
| 110 |
+
('/api/extract-prc', 'PRC ID', TEST_URLS['prc']),
|
| 111 |
+
('/api/extract-umid', 'UMID', TEST_URLS['umid']),
|
| 112 |
+
('/api/extract-sss', 'SSS ID', TEST_URLS['sss']),
|
| 113 |
+
('/api/extract-passport', 'Passport', TEST_URLS['passport']),
|
| 114 |
+
('/api/extract-postal', 'Postal ID', TEST_URLS['postal']),
|
| 115 |
+
('/api/extract-phic', 'PhilHealth ID', TEST_URLS['phic']),
|
| 116 |
+
('/api/extract-nbi', 'NBI Clearance', TEST_URLS['nbi']),
|
| 117 |
+
('/api/extract-police-clearance', 'Police Clearance', TEST_URLS['police']),
|
| 118 |
+
('/api/extract-tesda', 'TESDA Certificate', TEST_URLS['tesda']),
|
| 119 |
+
]
|
| 120 |
+
|
| 121 |
+
for endpoint, doc_type, url in endpoints_to_test:
|
| 122 |
+
key = doc_type.lower().replace(' ', '_').replace("'", '')
|
| 123 |
+
results[key] = test_extraction_endpoint(endpoint, doc_type, url)
|
| 124 |
+
|
| 125 |
+
# Print summary
|
| 126 |
+
print("\n" + "="*60)
|
| 127 |
+
print("Test Summary")
|
| 128 |
+
print("="*60)
|
| 129 |
+
|
| 130 |
+
passed = sum(1 for v in results.values() if v)
|
| 131 |
+
total = len(results)
|
| 132 |
+
|
| 133 |
+
for test_name, passed_test in results.items():
|
| 134 |
+
status = "✓ PASS" if passed_test else "✗ FAIL"
|
| 135 |
+
print(f"{status} - {test_name}")
|
| 136 |
+
|
| 137 |
+
print("\n" + "="*60)
|
| 138 |
+
print(f"Results: {passed}/{total} tests passed")
|
| 139 |
+
print("="*60)
|
| 140 |
+
|
| 141 |
+
return 0 if passed == total else 1
|
| 142 |
+
|
| 143 |
+
if __name__ == '__main__':
|
| 144 |
+
sys.exit(main())
|
| 145 |
+
|