markobinario commited on
Commit
7af3700
·
verified ·
1 Parent(s): 7250ce9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +337 -123
app.py CHANGED
@@ -1,128 +1,342 @@
1
- *** Begin Patch
2
- *** Update File: app.py
3
- @@
4
- - if 'pdf' in request.files and request.files['pdf'].filename:
5
- - pdf = request.files['pdf']
6
- - pdf_path = os.path.join(temp_dir, pdf.filename)
7
- - pdf.save(pdf_path)
8
- - print(f"📄 PDF saved to {pdf_path}")
9
- -
10
- - try:
11
- - print("🚀 Sending PDF to /handle_complex_doc...")
12
- - result = ocr_client.predict(
13
- - file_path=handle_file(pdf_path),
14
- - use_chart_recognition=False,
15
- - api_name="/handle_complex_doc"
16
- - )
17
- - print("✅ OCR completed for PDF")
18
- - print(f"OCR raw result: {result}")
19
- -
20
- - if isinstance(result, (list, tuple)) and len(result) >= 1:
21
- - extracted_text = str(result[0])
22
- - elif isinstance(result, str):
23
- - extracted_text = result
24
- - else:
25
- - extracted_text = f"Unexpected result format: {type(result)}"
26
- -
27
- - return jsonify({"extracted_text": extracted_text.strip()})
28
- - except Exception as e:
29
- - return jsonify({"error": f"Error processing PDF: {e}"}), 500
30
- + if 'pdf' in request.files and request.files['pdf'].filename:
31
- + pdf = request.files['pdf']
32
- + pdf_path = os.path.join(temp_dir, pdf.filename)
33
- + pdf.save(pdf_path)
34
- + print(f"📄 PDF saved to {pdf_path}")
35
- +
36
- + # Try local text extraction via PyMuPDF first
37
- + try:
38
- + doc = fitz.open(pdf_path)
39
- + collected_text = []
40
- + for page_index, page in enumerate(doc):
41
- + text = page.get_text("text") or ""
42
- + collected_text.append(f"--- Page {page_index + 1} ---\n{text.strip()}\n")
43
- + local_text = "\n".join(collected_text).strip()
44
- + if local_text:
45
- + print("✅ Extracted text locally via PyMuPDF")
46
- + return jsonify({"extracted_text": local_text})
47
- + except Exception as e:
48
- + print(f"⚠️ Local PyMuPDF extraction failed: {e}")
49
- +
50
- + # Fallback to remote endpoint for complex layout parsing
51
- + try:
52
- + print("🚀 Sending PDF to /handle_complex_doc...")
53
- + result = ocr_client.predict(
54
- + file_path=handle_file(pdf_path),
55
- + use_chart_recognition=False,
56
- + api_name="/handle_complex_doc"
57
- + )
58
- + print("✅ OCR completed for PDF")
59
- + print(f"OCR raw result: {result}")
60
- +
61
- + if isinstance(result, (list, tuple)) and len(result) >= 1:
62
- + extracted_text = str(result[0])
63
- + elif isinstance(result, str):
64
- + extracted_text = result
65
- + else:
66
- + extracted_text = f"Unexpected result format: {type(result)}"
67
- +
68
- + return jsonify({"extracted_text": extracted_text.strip()})
69
- + except Exception as e:
70
- + return jsonify({"error": f"Error processing PDF: {e}"}), 500
71
- *** End Patched_text = f"Unexpected result format: {type(result)}"
72
-
73
- return jsonify({"extracted_text": extracted_text.strip()})
74
- except Exception as e:
75
- return jsonify({"error": f"Error processing image: {e}"}), 500
76
-
77
- # Handle PDF uploads
78
- if 'pdf' in request.files and request.files['pdf'].filename:
79
- pdf = request.files['pdf']
80
- pdf_path = os.path.join(temp_dir, pdf.filename)
81
- pdf.save(pdf_path)
82
- print(f"📄 PDF saved to {pdf_path}")
83
-
84
- # Try local text extraction via PyMuPDF first
85
- try:
86
- doc = fitz.open(pdf_path)
87
- collected_text = []
88
- for page_index, page in enumerate(doc):
89
- text = page.get_text("text") or ""
90
- collected_text.append(f"--- Page {page_index + 1} ---\n{text.strip()}\n")
91
- local_text = "\n".join(collected_text).strip()
92
- if local_text:
93
- print(" Extracted text locally via PyMuPDF")
94
- return jsonify({"extracted_text": local_text})
95
- except Exception as e:
96
- print(f"⚠️ Local PyMuPDF extraction failed: {e}")
97
-
98
- # Fallback to remote endpoint for complex layout parsing
99
- try:
100
- print("🚀 Sending PDF to /handle_complex_doc...")
101
- result = ocr_client.predict(
102
- file_path=handle_file(pdf_path),
103
- use_chart_recognition=False,
104
- api_name="/handle_complex_doc"
105
- )
106
- print("✅ OCR completed for PDF")
107
- print(f"OCR raw result: {result}")
108
-
109
- if isinstance(result, (list, tuple)) and len(result) >= 1:
110
- extracted_text = str(result[0])
111
- elif isinstance(result, str):
112
- extracted_text = result
113
- else:
114
- extracted_text = f"Unexpected result format: {type(result)}"
115
-
116
- return jsonify({"extracted_text": extracted_text.strip()})
117
- except Exception as e:
118
- return jsonify({"error": f"Error processing PDF: {e}"}), 500
119
-
120
- return jsonify({"error": "No file uploaded. Please upload an image or a PDF."}), 400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  except Exception as e:
123
- print(f" Fatal error in /extract: {e}")
124
- return jsonify({"error": f"Fatal error: {str(e)}"}), 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
 
 
 
 
 
126
 
127
  if __name__ == "__main__":
128
- app.run(debug=True)
 
 
1
+ import base64
2
+ import io
3
+ import json
4
+ import os
5
+ from typing import Dict, List, Tuple, Any, Optional
6
+ import time
7
+ import requests
8
+ from PIL import Image
9
+ import fitz # PyMuPDF
10
+ import gradio as gr
11
+ # =========================
12
+ # Config
13
+ # =========================
14
+ DEFAULT_API_URL = os.environ.get("API_URL")
15
+ TOKEN = os.environ.get("TOKEN")
16
+ LOGO_IMAGE_PATH = './assets/logo.jpg'
17
+ GOOGLE_FONTS_URL = "<link href='https://fonts.googleapis.com/css2?family=Noto+Sans+SC:wght@400;700&display=swap' rel='stylesheet'>"
18
+ LATEX_DELIMS = [
19
+ {"left": "$$", "right": "$$", "display": True},
20
+ {"left": "$", "right": "$", "display": False},
21
+ {"left": "\\(", "right": "\\)", "display": False},
22
+ {"left": "\\[", "right": "\\]", "display": True},
23
+ ]
24
+ AUTH_HEADER = {"Authorization": f"bearer {TOKEN}"}
25
+ JSON_HEADERS = {**AUTH_HEADER, "Content-Type": "application/json"}
26
+ # =========================
27
+ # Base64 and Example Loading Logic
28
+ # =========================
29
+ def image_to_base64_data_url(filepath: str) -> str:
30
+ """Reads a local image file and encodes it into a Base64 Data URL."""
31
+ try:
32
+ ext = os.path.splitext(filepath)[1].lower()
33
+ mime_types = {'.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.png': 'image/png', '.gif': 'image/gif'}
34
+ mime_type = mime_types.get(ext, 'image/jpeg')
35
+ with open(filepath, "rb") as image_file:
36
+ encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
37
+ return f"data:{mime_type};base64,{encoded_string}"
38
+ except Exception as e:
39
+ print(f"Error encoding image to Base64: {e}")
40
+ return ""
41
+
42
+
43
+ def _get_examples_from_dir(dir_path: str) -> List[List[str]]:
44
+ supported_exts = {".png", ".jpg", ".jpeg", ".bmp", ".webp"}
45
+ examples = []
46
+ if not os.path.exists(dir_path): return []
47
+ for filename in sorted(os.listdir(dir_path)):
48
+ if os.path.splitext(filename)[1].lower() in supported_exts:
49
+ examples.append([os.path.join(dir_path, filename)])
50
+ return examples
51
+
52
+ TARGETED_EXAMPLES_DIR = "examples/targeted"
53
+ COMPLEX_EXAMPLES_DIR = "examples/complex"
54
+ targeted_recognition_examples = _get_examples_from_dir(TARGETED_EXAMPLES_DIR)
55
+ complex_document_examples = _get_examples_from_dir(COMPLEX_EXAMPLES_DIR)
56
+
57
+ # =========================
58
+ # UI Helpers
59
+ # =========================
60
+ def render_uploaded_image_div(file_path: str) -> str:
61
+ data_url = image_to_base64_data_url(file_path)
62
+ return f"""
63
+ <div class="uploaded-image">
64
+ <img src="{data_url}" alt="Uploaded image" style="width:100%;height:100%;object-fit:contain;"/>
65
+ </div>
66
+ """
67
+
68
+ def update_preview_visibility(file_path: Optional[str]) -> Dict:
69
+ if file_path:
70
+ ext = os.path.splitext(file_path)[1].lower()
71
+ if ext == ".pdf":
72
+ # Hide image preview for PDFs
73
+ return gr.update(value="", visible=False)
74
+ html_content = render_uploaded_image_div(file_path)
75
+ return gr.update(value=html_content, visible=True)
76
+ else:
77
+ return gr.update(value="", visible=False)
78
+
79
+ def _on_gallery_select(example_paths: List[str], evt: gr.SelectData):
80
+ try:
81
+ idx = evt.index
82
+ return example_paths[idx]
83
+ except Exception:
84
+ return None
85
+
86
+ # =========================
87
+ # API Call Logic
88
+ # =========================
89
+ def _file_to_b64_image_only(file_path: str) -> Tuple[str, int]:
90
+ if not file_path: raise ValueError("Please upload an image first.")
91
+ ext = os.path.splitext(file_path)[1].lower()
92
+ if ext not in {".png", ".jpg", ".jpeg", ".bmp", ".webp"}: raise ValueError("Only image files are supported.")
93
+ with open(file_path, "rb") as f:
94
+ return base64.b64encode(f.read()).decode("utf-8"), 1
95
+
96
+ def _call_api(api_url: str, file_path: str, use_layout_detection: bool,
97
+ prompt_label: Optional[str], use_chart_recognition: bool = False) -> Dict[str, Any]:
98
+ b64, file_type = _file_to_b64_image_only(file_path)
99
+ payload = {
100
+ "file": b64,
101
+ "useLayoutDetection": bool(use_layout_detection),
102
+ "fileType": file_type,
103
+ "layoutMergeBboxesMode": "union",
104
+ }
105
+ if not use_layout_detection:
106
+ if not prompt_label:
107
+ raise ValueError("Please select a recognition type.")
108
+ payload["promptLabel"] = prompt_label.strip().lower()
109
+ if use_layout_detection and use_chart_recognition:
110
+ payload["useChartRecognition"] = True
111
+
112
+ try:
113
+ print(f"Sending API request to {api_url}...")
114
+ start_time = time.time()
115
+ resp = requests.post(api_url, json=payload, headers=JSON_HEADERS, timeout=600)
116
+ end_time = time.time()
117
+ duration = end_time - start_time
118
+ print(f"Received API response in {duration:.2f} seconds.")
119
+
120
+ resp.raise_for_status()
121
+ data = resp.json()
122
+ except requests.exceptions.RequestException as e:
123
+ raise gr.Error(f"API request failed:{e}")
124
+ except json.JSONDecodeError:
125
+ raise gr.Error(f"Invalid JSON response from server:\n{getattr(resp, 'text', '')}")
126
+
127
+ if data.get("errorCode", -1) != 0:
128
+ raise gr.Error("API returned an error:")
129
+ return data
130
+
131
+
132
+ # =========================
133
+ # API Response Processing
134
+ # =========================
135
+
136
+ # 【改动点】: 这个函数现在不再需要,因为我们不再将URL下载为PIL Image对象。
137
+ # def url_to_pil_image(url: str) -> Optional[Image.Image]:
138
+ # """Downloads an image from a URL and returns it as a PIL Image object for the Gradio Image component."""
139
+ # if not url or not url.startswith(('http://', 'https://')):
140
+ # print(f"Warning: Invalid URL provided for visualization image: {url}")
141
+ # return None
142
+ # try:
143
+ # start_time = time.time()
144
+ # response = requests.get(url, timeout=600)
145
+ # end_time = time.time()
146
+ # print(f"Fetched visualization image from {url} in {end_time - start_time:.2f} seconds.")
147
+ #
148
+ # response.raise_for_status()
149
+ # image_bytes = response.content
150
+ # pil_image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
151
+ # return pil_image
152
+ # except requests.exceptions.RequestException as e:
153
+ # print(f"Error fetching visualization image from URL {url}: {e}")
154
+ # return None
155
+ # except Exception as e:
156
+ # print(f"Error processing visualization image from URL {url}: {e}")
157
+ # return None
158
+
159
+ def _process_api_response_page(result: Dict[str, Any]) -> Tuple[str, str, str]:
160
+ """
161
+ Processes the API response.
162
+ 1. Replaces markdown image placeholders with their direct URLs.
163
+ 2. Constructs an HTML <img> tag string for the visualization image URL.
164
+ """
165
+ layout_results = (result or {}).get("layoutParsingResults", [])
166
+ if not layout_results:
167
+ return "No content was recognized.", "<p>No visualization available.</p>", ""
168
 
169
+ page0 = layout_results[0] or {}
170
+
171
+ # Step 1: Process Markdown content (unchanged from previous optimization)
172
+ md_data = page0.get("markdown") or {}
173
+ md_text = md_data.get("text", "") or ""
174
+ md_images_map = md_data.get("images", {})
175
+ if md_images_map:
176
+ for placeholder_path, image_url in md_images_map.items():
177
+ md_text = md_text.replace(f'src="{placeholder_path}"', f'src="{image_url}"') \
178
+ .replace(f']({placeholder_path})', f']({image_url})')
179
+
180
+ # 【核心改动点】 Step 2: Process Visualization images by creating an HTML string
181
+ output_html = "<p style='text-align:center; color:#888;'>No visualization image available.</p>"
182
+ out_imgs = page0.get("outputImages") or {}
183
+
184
+ # Get all image URLs and sort them
185
+ sorted_urls = [img_url for _, img_url in sorted(out_imgs.items()) if img_url]
186
+
187
+ # Logic to select the final visualization image URL
188
+ output_image_url: Optional[str] = None
189
+ if len(sorted_urls) >= 2:
190
+ output_image_url = sorted_urls[1]
191
+ elif sorted_urls:
192
+ output_image_url = sorted_urls[0]
193
+
194
+ # If a URL was found, create the <img> tag
195
+ if output_image_url:
196
+ print(f"Found visualization image URL: {output_image_url}")
197
+ # The CSS will style this `img` tag because of the `#vis_image_doc img` selector
198
+ output_html = f'<img src="{output_image_url}" alt="Detection Visualization">'
199
+ else:
200
+ print("Warning: No visualization image URL found in the API response.")
201
+
202
+ return md_text or "(Empty result)", output_html, md_text
203
+
204
+ # =========================
205
+ # Handlers
206
+ # =========================
207
+ def _extract_pdf_text_with_pymupdf(pdf_path: str) -> Tuple[str, str, str]:
208
+ """Extract plain text from a PDF using PyMuPDF and return as markdown and raw."""
209
+ try:
210
+ doc = fitz.open(pdf_path)
211
+ parts = []
212
+ for i, page in enumerate(doc):
213
+ text = page.get_text("text") or ""
214
+ parts.append(f"### Page {i + 1}\n\n{text.strip()}\n")
215
+ md_text = "\n".join(parts).strip() or "No content was recognized."
216
+ vis_html = "<p>No visualization available for PDF text extraction.</p>"
217
+ return md_text, vis_html, md_text
218
  except Exception as e:
219
+ raise gr.Error(f"Failed to read PDF: {e}")
220
+
221
+ def handle_complex_doc(file_path: str, use_chart_recognition: bool) -> Tuple[str, str, str]:
222
+ if not file_path: raise gr.Error("Please upload an image or PDF first.")
223
+ ext = os.path.splitext(file_path)[1].lower()
224
+ if ext == ".pdf":
225
+ # Use local PyMuPDF for PDFs
226
+ return _extract_pdf_text_with_pymupdf(file_path)
227
+ # Otherwise call remote API for images
228
+ data = _call_api(DEFAULT_API_URL, file_path, use_layout_detection=True, prompt_label=None, use_chart_recognition=use_chart_recognition)
229
+ result = data.get("result", {})
230
+ return _process_api_response_page(result)
231
+
232
+ def handle_targeted_recognition(file_path: str, prompt_choice: str) -> Tuple[str, str]:
233
+ if not file_path: raise gr.Error("Please upload an image first.")
234
+ mapping = {"Text Recognition": "ocr", "Formula Recognition": "formula", "Table Recognition": "table", "Chart Recognition": "chart"}
235
+ label = mapping.get(prompt_choice, "ocr")
236
+ data = _call_api(DEFAULT_API_URL, file_path, use_layout_detection=False, prompt_label=label)
237
+ result = data.get("result", {})
238
+ md_preview, _, md_raw = _process_api_response_page(result)
239
+ return md_preview, md_raw
240
+
241
+ # =========================
242
+ # CSS & UI
243
+ # =========================
244
+ custom_css = """
245
+ /* 全局字体 */
246
+ body, .gradio-container {
247
+ font-family: "Noto Sans SC", "Microsoft YaHei", "PingFang SC", sans-serif;
248
+ }
249
+ /* ... (rest of the CSS is unchanged) ... */
250
+ .app-header { text-align: center; max-width: 900px; margin: 0 auto 8px !important; }
251
+ .gradio-container { padding: 4px 0 !important; }
252
+ .gradio-container [data-testid="tabs"], .gradio-container .tabs { margin-top: 0 !important; }
253
+ .gradio-container [data-testid="tabitem"], .gradio-container .tabitem { padding-top: 4px !important; }
254
+ .quick-links { text-align: center; padding: 8px 0; border: 1px solid #e5e7eb; border-radius: 8px; margin: 8px auto; max-width: 900px; }
255
+ .quick-links a { margin: 0 12px; font-size: 14px; font-weight: 600; color: #3b82f6; text-decoration: none; }
256
+ .quick-links a:hover { text-decoration: underline; }
257
+ .prompt-grid { display: flex; flex-wrap: wrap; gap: 8px; margin-top: 6px; }
258
+ .prompt-grid button { height: 40px !important; padding: 0 12px !important; border-radius: 8px !important; font-weight: 600 !important; font-size: 13px !important; letter-spacing: 0.2px; }
259
+ #image_preview_vl, #image_preview_doc { height: 400px !important; overflow: auto; }
260
+ #image_preview_vl img, #image_preview_doc img, #vis_image_doc img { width: 100% !important; height: auto !important; object-fit: contain !important; display: block; }
261
+ #md_preview_vl, #md_preview_doc { max-height: 540px; min-height: 180px; overflow: auto; scrollbar-gutter: stable both-edges; }
262
+ #md_preview_vl .prose, #md_preview_doc .prose { line-height: 1.7 !important; }
263
+ #md_preview_vl .prose img, #md_preview_doc .prose img { display: block; margin: 0 auto; max-width: 100%; height: auto; }
264
+ .notice { margin: 8px auto 0; max-width: 900px; padding: 10px 12px; border: 1px solid #e5e7eb; border-radius: 8px; background: #f8fafc; font-size: 14px; line-height: 1.6; }
265
+ .notice strong { font-weight: 700; }
266
+ .notice a { color: #3b82f6; text-decoration: none; }
267
+ .notice a:hover { text-decoration: underline; }
268
+ """
269
+
270
+ with gr.Blocks(head=GOOGLE_FONTS_URL, css=custom_css, theme=gr.themes.Soft()) as demo:
271
+ logo_data_url = image_to_base64_data_url(LOGO_IMAGE_PATH) if os.path.exists(LOGO_IMAGE_PATH) else ""
272
+ gr.HTML(f"""<div class="app-header"><img src="{logo_data_url}" alt="App Logo" style="max-height:10%; width: auto; margin: 10px auto; display: block;"></div>""")
273
+ gr.HTML("""<div class="notice"><strong>Heads up:</strong> The Hugging Face demo can be slow at times. For a faster experience, please try <a href="https://aistudio.baidu.com/application/detail/98365" target="_blank" rel="noopener noreferrer">Baidu AI Studio</a> or <a href="https://modelscope.cn/studios/PaddlePaddle/PaddleOCR-VL_Online_Demo/summary" target="_blank" rel="noopener noreferrer">ModelScope</a>.</div>""")
274
+ gr.HTML("""<div class="quick-links"><a href="https://github.com/PaddlePaddle/PaddleOCR" target="_blank">GitHub</a> | <a href="https://ernie.baidu.com/blog/publication/PaddleOCR-VL_Technical_Report.pdf" target="_blank">Technical Report</a> | <a href="https://huggingface.co/PaddlePaddle/PaddleOCR-VL" target="_blank">Model</a></div>""")
275
+
276
+ with gr.Tabs():
277
+ with gr.Tab("Document Parsing"):
278
+ with gr.Row():
279
+ with gr.Column(scale=5):
280
+ file_doc = gr.File(label="Upload Image or PDF", file_count="single", type="filepath", file_types=["image", ".pdf"])
281
+ preview_doc_html = gr.HTML(value="", elem_id="image_preview_doc", visible=False)
282
+ gr.Markdown("_( Use this mode for recognizing full-page documents with structured layouts, such as reports, papers, or magazines.)_")
283
+ gr.Markdown("💡 *To recognize a single, pre-cropped element (e.g., a table or formula), switch to the 'Element-level Recognition' tab for better results.*")
284
+ with gr.Row(variant="panel"):
285
+ chart_parsing_switch = gr.Checkbox(label="Enable chart parsing", value=False, scale=1)
286
+ btn_parse = gr.Button("Parse Document", variant="primary", scale=2)
287
+ if complex_document_examples:
288
+ complex_paths = [e[0] for e in complex_document_examples]
289
+ complex_state = gr.State(complex_paths)
290
+ gr.Markdown("**Document Examples (Click an image to load)**")
291
+ gallery_complex = gr.Gallery(value=complex_paths, columns=4, height=400, preview=False, label=None, allow_preview=False)
292
+ gallery_complex.select(fn=_on_gallery_select, inputs=[complex_state], outputs=[file_doc])
293
+
294
+ with gr.Column(scale=7):
295
+ with gr.Tabs():
296
+ with gr.Tab("Markdown Preview"):
297
+ md_preview_doc = gr.Markdown("Please upload an image and click 'Parse Document'.", latex_delimiters=LATEX_DELIMS, elem_id="md_preview_doc")
298
+ with gr.Tab("Visualization"):
299
+ # 【核心改动点】: 将 gr.Image 替换为 gr.HTML
300
+ vis_image_doc = gr.HTML(label="Detection Visualization", elem_id="vis_image_doc")
301
+ with gr.Tab("Markdown Source"):
302
+ md_raw_doc = gr.Code(label="Markdown Source Code", language="markdown")
303
+
304
+ file_doc.change(fn=update_preview_visibility, inputs=[file_doc], outputs=[preview_doc_html])
305
+ btn_parse.click(fn=handle_complex_doc, inputs=[file_doc, chart_parsing_switch], outputs=[md_preview_doc, vis_image_doc, md_raw_doc])
306
+
307
+ with gr.Tab("Element-level Recognition"):
308
+ with gr.Row():
309
+ with gr.Column(scale=5):
310
+ file_vl = gr.File(label="Upload Image", file_count="single", type="filepath", file_types=["image"])
311
+ preview_vl_html = gr.HTML(value="", elem_id="image_preview_vl", visible=False)
312
+ gr.Markdown("_(Best for images with a **simple, single-column layout** (e.g., pure text), or for a **pre-cropped single element** like a table, formula, or chart.)_")
313
+ gr.Markdown("Choose a recognition type:")
314
+ with gr.Row(elem_classes=["prompt-grid"]):
315
+ btn_ocr = gr.Button("Text Recognition", variant="secondary")
316
+ btn_formula = gr.Button("Formula Recognition", "secondary")
317
+ with gr.Row(elem_classes=["prompt-grid"]):
318
+ btn_table = gr.Button("Table Recognition", variant="secondary")
319
+ btn_chart = gr.Button("Chart Recognition", variant="secondary")
320
+ if targeted_recognition_examples:
321
+ targeted_paths = [e[0] for e in targeted_recognition_examples]
322
+ targeted_state = gr.State(targeted_paths)
323
+ gr.Markdown("**Element-level Recognition Examples (Click an image to load)**")
324
+ gallery_targeted = gr.Gallery(value=targeted_paths, columns=4, height=400, preview=False, label=None, allow_preview=False)
325
+ gallery_targeted.select(fn=_on_gallery_select, inputs=[targeted_state], outputs=[file_vl])
326
+
327
+ with gr.Column(scale=7):
328
+ with gr.Tabs():
329
+ with gr.Tab("Recognition Result"):
330
+ md_preview_vl = gr.Markdown("Please upload an image and click a recognition type.", latex_delimiters=LATEX_DELIMS, elem_id="md_preview_vl")
331
+ with gr.Tab("Raw Output"):
332
+ md_raw_vl = gr.Code(label="Raw Output", language="markdown")
333
 
334
+ file_vl.change(fn=update_preview_visibility, inputs=[file_vl], outputs=[preview_vl_html])
335
+ btn_ocr.click(fn=handle_targeted_recognition, inputs=[file_vl, gr.State("Text Recognition")], outputs=[md_preview_vl, md_raw_vl])
336
+ btn_formula.click(fn=handle_targeted_recognition, inputs=[file_vl, gr.State("Formula Recognition")], outputs=[md_preview_vl, md_raw_vl])
337
+ btn_table.click(fn=handle_targeted_recognition, inputs=[file_vl, gr.State("Table Recognition")], outputs=[md_preview_vl, md_raw_vl])
338
+ btn_chart.click(fn=handle_targeted_recognition, inputs=[file_vl, gr.State("Chart Recognition")], outputs=[md_preview_vl, md_raw_vl])
339
 
340
  if __name__ == "__main__":
341
+ port = int(os.getenv("PORT", "7860"))
342
+ demo.queue(max_size=6).launch(server_name="0.0.0.0", server_port=port,share=False)