root commited on
Commit
bff3709
·
1 Parent(s): d148365

Move images to Git LFS (assets/, examples/)

Browse files
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.png filter=lfs diff=lfs merge=lfs -text
37
+ *.jpg filter=lfs diff=lfs merge=lfs -text
38
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
39
+ *.gif filter=lfs diff=lfs merge=lfs -text
40
+ *.bmp filter=lfs diff=lfs merge=lfs -text
41
+ *.webp filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ import json
4
+ import os
5
+ from typing import Dict, List, Tuple, Any, Optional
6
+
7
+ import requests
8
+ from PIL import Image
9
+ import gradio as gr
10
+ # =========================
11
+ # Config
12
+ # =========================
13
+ DEFAULT_API_URL = os.environ.get("API_URL")
14
+ LOGO_IMAGE_PATH = './assets/logo.jpg'
15
+ GOOGLE_FONTS_URL = "<link href='https://fonts.googleapis.com/css2?family=Noto+Sans+SC:wght@400;700&display=swap' rel='stylesheet'>"
16
+ LATEX_DELIMS = [
17
+ {"left": "$$", "right": "$$", "display": True},
18
+ {"left": "$", "right": "$", "display": False},
19
+ {"left": "\\(", "right": "\\)", "display": False},
20
+ {"left": "\\[", "right": "\\]", "display": True},
21
+ ]
22
+
23
+ # =========================
24
+ # Base64 and Example Loading Logic (From New Script)
25
+ # =========================
26
+ def image_to_base64_data_url(filepath: str) -> str:
27
+ """Reads a local image file and encodes it into a Base64 Data URL."""
28
+ try:
29
+ ext = os.path.splitext(filepath)[1].lower()
30
+ mime_types = {'.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.png': 'image/png', '.gif': 'image/gif'}
31
+ mime_type = mime_types.get(ext, 'image/jpeg')
32
+ with open(filepath, "rb") as image_file:
33
+ encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
34
+ return f"data:{mime_type};base64,{encoded_string}"
35
+ except Exception as e:
36
+ print(f"Error encoding image to Base64: {e}")
37
+ return ""
38
+
39
+ def _get_examples_from_dir(dir_path: str) -> List[List[str]]:
40
+ supported_exts = {".png", ".jpg", ".jpeg", ".bmp", ".webp"}
41
+ examples = []
42
+ if not os.path.exists(dir_path): return []
43
+ for filename in sorted(os.listdir(dir_path)):
44
+ if os.path.splitext(filename)[1].lower() in supported_exts:
45
+ examples.append([os.path.join(dir_path, filename)])
46
+ return examples
47
+
48
+ TARGETED_EXAMPLES_DIR = "examples/targeted"
49
+ COMPLEX_EXAMPLES_DIR = "examples/complex"
50
+ targeted_recognition_examples = _get_examples_from_dir(TARGETED_EXAMPLES_DIR)
51
+ complex_document_examples = _get_examples_from_dir(COMPLEX_EXAMPLES_DIR)
52
+
53
+ # =========================
54
+ # UI Helpers (From New Script)
55
+ # =========================
56
+ def render_uploaded_image_div(file_path: str) -> str:
57
+ data_url = image_to_base64_data_url(file_path)
58
+ return f"""
59
+ <div class="uploaded-image">
60
+ <img src="{data_url}" alt="Uploaded image" style="width:100%;height:100%;object-fit:contain;"/>
61
+ </div>
62
+ """
63
+
64
+ def update_preview_visibility(file_path: Optional[str]) -> Dict:
65
+ if file_path:
66
+ html_content = render_uploaded_image_div(file_path)
67
+ return gr.update(value=html_content, visible=True)
68
+ else:
69
+ return gr.update(value="", visible=False)
70
+
71
+ def _on_gallery_select(example_paths: List[str], evt: gr.SelectData):
72
+ try:
73
+ idx = evt.index
74
+ return example_paths[idx]
75
+ except Exception:
76
+ return None
77
+
78
+ # =========================
79
+ # API Call Logic (From New Script - More feature complete)
80
+ # =========================
81
+ def _file_to_b64_image_only(file_path: str) -> Tuple[str, int]:
82
+ if not file_path: raise ValueError("Please upload an image first.")
83
+ ext = os.path.splitext(file_path)[1].lower()
84
+ if ext not in {".png", ".jpg", ".jpeg", ".bmp", ".webp"}: raise ValueError("Only image files are supported.")
85
+ with open(file_path, "rb") as f:
86
+ return base64.b64encode(f.read()).decode("utf-8"), 1
87
+
88
+ def _call_api(api_url: str, file_path: str, use_layout_detection: bool, prompt_label: Optional[str], use_chart_recognition: bool = False) -> Dict[str, Any]:
89
+ b64, file_type = _file_to_b64_image_only(file_path)
90
+ payload = {"file": b64, "useLayoutDetection": bool(use_layout_detection), "fileType": file_type, "layoutMergeBboxesMode": "union"}
91
+
92
+ if not use_layout_detection:
93
+ if not prompt_label: raise ValueError("Please select a recognition type.")
94
+ payload["promptLabel"] = prompt_label.strip().lower()
95
+
96
+ # This parameter is from the new script's logic
97
+ if use_layout_detection and use_chart_recognition:
98
+ payload["use_chart_recognition"] = True
99
+
100
+ try:
101
+ resp = requests.post(api_url, json=payload, timeout=120)
102
+ resp.raise_for_status()
103
+ data = resp.json()
104
+ except requests.exceptions.RequestException as e:
105
+ raise gr.Error(f"API request failed: {e}")
106
+ except json.JSONDecodeError:
107
+ raise gr.Error(f"Invalid JSON response from server:\n{getattr(resp, 'text', '')}")
108
+ if data.get("errorCode", -1) != 0:
109
+ raise gr.Error(f"API returned an error: errorCode={data.get('errorCode')} errorMsg={data.get('errorMsg')}")
110
+ return data
111
+
112
+ # =========================
113
+ # Core Logic for Handling Image URLs (From Old "Xinghe" Script)
114
+ # =========================
115
+ def url_to_base64_data_url(url: str) -> str:
116
+ """Downloads an image from a URL and formats it as a Base64 Data URL for Markdown."""
117
+ try:
118
+ response = requests.get(url, timeout=30)
119
+ response.raise_for_status()
120
+ mime_type = response.headers.get('Content-Type', 'image/jpeg')
121
+ if not mime_type.startswith('image/'):
122
+ print(f"Warning: URL did not return an image content type. Got: {mime_type}")
123
+ mime_type = 'image/jpeg'
124
+ image_bytes = response.content
125
+ encoded_string = base64.b64encode(image_bytes).decode('utf-8')
126
+ return f"data:{mime_type};base64,{encoded_string}"
127
+ except requests.exceptions.RequestException as e:
128
+ print(f"Error fetching markdown image from URL {url}: {e}")
129
+ return url # Fallback to original URL on error
130
+ except Exception as e:
131
+ print(f"An unexpected error occurred while processing markdown URL {url}: {e}")
132
+ return url
133
+
134
+ def replace_image_urls_with_data_urls(md_text: str, md_images_map: Dict[str, str]) -> str:
135
+ """Replaces image placeholder paths in Markdown with Base64 Data URLs fetched from external URLs."""
136
+ if not md_images_map:
137
+ return md_text
138
+ for placeholder_path, image_url in md_images_map.items():
139
+ print(f"Processing markdown image for '{placeholder_path}' from URL: {image_url}")
140
+ data_url = url_to_base64_data_url(image_url)
141
+ md_text = md_text.replace(f'src="{placeholder_path}"', f'src="{data_url}"') \
142
+ .replace(f']({placeholder_path})', f']({data_url})')
143
+ return md_text
144
+
145
+ def url_to_pil_image(url: str) -> Optional[Image.Image]:
146
+ """Downloads an image from a URL and returns it as a PIL Image object for the Gradio Image component."""
147
+ if not url or not url.startswith(('http://', 'https://')):
148
+ print(f"Warning: Invalid URL provided for visualization image: {url}")
149
+ return None
150
+ try:
151
+ response = requests.get(url, timeout=30)
152
+ response.raise_for_status()
153
+ image_bytes = response.content
154
+ pil_image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
155
+ return pil_image
156
+ except requests.exceptions.RequestException as e:
157
+ print(f"Error fetching visualization image from URL {url}: {e}")
158
+ return None
159
+ except Exception as e:
160
+ print(f"Error processing visualization image from URL {url}: {e}")
161
+ return None
162
+
163
+ # =========================
164
+ # API Response Processing (From Old "Xinghe" Script - Handles URLs)
165
+ # =========================
166
+ def _process_api_response_page(result: Dict[str, Any]) -> Tuple[str, Optional[Image.Image], str]:
167
+ """
168
+ Processes the API response which contains URLs for images.
169
+ 1. Converts markdown image URLs to inline Base64 Data URLs.
170
+ 2. Downloads the visualization image URL into a PIL Image object.
171
+ """
172
+ layout_results = (result or {}).get("layoutParsingResults", [])
173
+ if not layout_results:
174
+ return "No content was recognized.", None, ""
175
+
176
+ page0 = layout_results[0] or {}
177
+
178
+ # Step 1: Process Markdown content using URL-to-Base64 logic
179
+ md_data = page0.get("markdown") or {}
180
+ md_text = md_data.get("text", "") or ""
181
+ md_images_map = md_data.get("images", {}) # This map contains URLs: {"placeholder.jpg": "http://..."}
182
+ if md_images_map:
183
+ md_text = replace_image_urls_with_data_urls(md_text, md_images_map)
184
+
185
+ # Step 2: Process Visualization images by downloading from URLs
186
+ vis_images: List[Image.Image] = []
187
+ out_imgs = page0.get("outputImages") or {} # This dict contains URLs: {"0": "http://...", "1": "http://..."}
188
+ for _, img_url in sorted(out_imgs.items()):
189
+ pil_image = url_to_pil_image(img_url)
190
+ if pil_image:
191
+ vis_images.append(pil_image)
192
+ else:
193
+ print(f"Warning: Failed to load visualization image from URL: {img_url}")
194
+
195
+ # Logic to select the final visualization image
196
+ output_image: Optional[Image.Image] = None
197
+ if len(vis_images) >= 2:
198
+ output_image = vis_images[1]
199
+ elif vis_images:
200
+ output_image = vis_images[0]
201
+
202
+ return md_text or "(Empty result)", output_image, md_text
203
+
204
+ # =========================
205
+ # Handlers (From New Script - More feature complete)
206
+ # =========================
207
+ def handle_complex_doc(file_path: str, use_chart_recognition: bool) -> Tuple[str, Optional[Image.Image], str]:
208
+ if not file_path: raise gr.Error("Please upload an image first.")
209
+ data = _call_api(DEFAULT_API_URL, file_path, use_layout_detection=True, prompt_label=None, use_chart_recognition=use_chart_recognition)
210
+ result = data.get("result", {})
211
+ return _process_api_response_page(result)
212
+
213
+ def handle_targeted_recognition(file_path: str, prompt_choice: str) -> Tuple[str, str]:
214
+ if not file_path: raise gr.Error("Please upload an image first.")
215
+ mapping = {"Text Recognition": "ocr", "Formula Recognition": "formula", "Table Recognition": "table", "Chart Recognition": "chart"}
216
+ label = mapping.get(prompt_choice, "ocr")
217
+ data = _call_api(DEFAULT_API_URL, file_path, use_layout_detection=False, prompt_label=label)
218
+ result = data.get("result", {})
219
+ md_preview, _, md_raw = _process_api_response_page(result)
220
+ return md_preview, md_raw
221
+
222
+ # =========================
223
+ # CSS & UI (From New Script)
224
+ # =========================
225
+ custom_css = '''
226
+ body, button, input, textarea, select, p, label { font-family: "Microsoft YaHei","微软雅黑","Microsoft YaHei UI", "Noto Sans SC","PingFang SC",sans-serif !important; }
227
+ .app-header { text-align: center; max-width: 900px; margin: 0 auto 4px !important; padding: 0 !important; }
228
+ .gradio-container { padding-top: 2px !important; padding-bottom: 2px !important; }
229
+ .gradio-container .tabs { margin-top: 0px !important; }
230
+ .gradio-container .tabitem { padding-top: 4px !important; }
231
+ .prompt-grid { gap: 8px !important; margin-top: 4px !important; }
232
+ .prompt-grid button { height: 40px !important; min-height: 0 !important; padding: 0 12px !important; border-radius: 8px !important; font-weight: 600 !important; font-size: 13px !important; letter-spacing: .2px; }
233
+ .quick-links { text-align: center; padding: 8px 0; border: 1px solid #e5e7eb; border-radius: 8px; margin: 8px auto !important; max-width: 900px; }
234
+ .quick-links a { margin: 0 15px; font-size: 14px; font-weight: 600; text-decoration: none; color: #3b82f6; }
235
+ .quick-links a:hover { text-decoration: underline; }
236
+ #image_preview_vl, #image_preview_doc { height: 60vh !important; overflow: auto; }
237
+ #vis_image_doc { height: 42vh !important; }
238
+ #image_preview_vl .uploaded-image, #image_preview_doc .uploaded-image { height: 100%; }
239
+ #image_preview_vl img, #image_preview_doc img, #vis_image_doc img { width: 100% !important; height: 100% !important; object-fit: contain !important; }
240
+ #md_preview_vl, #md_preview_doc { max-height: 60vh; overflow: auto; scrollbar-gutter: stable both-edges; }
241
+ #md_preview_doc .prose img,
242
+ #md_preview_vl .prose img {
243
+ display: block !important;
244
+ margin-left: auto !important;
245
+ margin-right: auto !important; /* 块级元素用 margin auto 居中 */
246
+ height: auto; /* 可选:保持比例 */
247
+ max-width: 100%; /* 可选:避免溢出 */
248
+ }
249
+ #md_preview_vl .prose, #md_preview_doc .prose { line-height: 1.7 !important; font-family: 'Microsoft YaHei','Noto Sans SC','PingFang SC',sans-serif !important; }
250
+ '''
251
+
252
+ with gr.Blocks(head=GOOGLE_FONTS_URL, css=custom_css, theme=gr.themes.Soft()) as demo:
253
+ logo_data_url = image_to_base64_data_url(LOGO_IMAGE_PATH) if os.path.exists(LOGO_IMAGE_PATH) else ""
254
+ gr.HTML(f"""
255
+ <div class="app-header">
256
+ <img src="{logo_data_url}" alt="App Logo" style="max-height:10%; width: auto; margin: 10px auto; display: block;">
257
+ </div>
258
+ """)
259
+ gr.HTML("""
260
+ <div class="quick-links">
261
+ <a href="https://github.com/PaddlePaddle/PaddleOCR" target="_blank">GitHub</a> |
262
+ <a href="https://github.com/PaddlePaddle/PaddleOCR/blob/main/ppstructure/docs/vls.md" target="_blank">Technical Report</a> |
263
+ <a href="https://xinghe.baidu.com/" target="_blank">Model</a>
264
+ </div>
265
+ """)
266
+
267
+ with gr.Tabs():
268
+ with gr.Tab("Document Parsing"):
269
+ with gr.Row():
270
+ with gr.Column(scale=5):
271
+ file_doc = gr.File(label="Upload Image", file_count="single", type="filepath", file_types=["image"])
272
+ preview_doc_html = gr.HTML(value="", elem_id="image_preview_doc", visible=False)
273
+
274
+ gr.Markdown("_( Use this mode for recognizing full-page documents with structured layouts, such as reports, papers, or magazines.)_")
275
+ gr.Markdown("💡 *To recognize a single, pre-cropped element (e.g., a table or formula), switch to the 'Content Recognition' tab for better results.*")
276
+
277
+ with gr.Row(variant="panel"):
278
+ chart_parsing_switch = gr.Checkbox(label="Enable chart parsing", value=False, scale=1)
279
+ btn_parse = gr.Button("Parse Document", variant="primary", scale=2)
280
+
281
+ if complex_document_examples:
282
+ complex_paths = [e[0] for e in complex_document_examples]
283
+ complex_state = gr.State(complex_paths)
284
+ gr.Markdown("**Document Examples (Click an image to load)**")
285
+ gallery_complex = gr.Gallery(value=complex_paths, columns=4, height=400, preview=False, label=None, allow_preview=False)
286
+ gallery_complex.select(fn=_on_gallery_select, inputs=[complex_state], outputs=[file_doc])
287
+
288
+ with gr.Column(scale=7):
289
+ with gr.Tabs():
290
+ with gr.Tab("Markdown Preview"):
291
+ md_preview_doc = gr.Markdown("Please upload an image and click 'Parse Document'.", latex_delimiters=LATEX_DELIMS, elem_id="md_preview_doc")
292
+ with gr.Tab("Visualization"):
293
+ vis_image_doc = gr.Image(label="Detection Visualization", interactive=False, elem_id="vis_image_doc")
294
+ with gr.Tab("Markdown Source"):
295
+ md_raw_doc = gr.Code(label="Markdown Source Code", language="markdown")
296
+
297
+ file_doc.change(fn=update_preview_visibility, inputs=[file_doc], outputs=[preview_doc_html])
298
+ btn_parse.click(fn=handle_complex_doc, inputs=[file_doc, chart_parsing_switch], outputs=[md_preview_doc, vis_image_doc, md_raw_doc])
299
+
300
+ with gr.Tab("Content Recognition"):
301
+ with gr.Row():
302
+ with gr.Column(scale=5):
303
+ file_vl = gr.File(label="Upload Image", file_count="single", type="filepath", file_types=["image"])
304
+ preview_vl_html = gr.HTML(value="", elem_id="image_preview_vl", visible=False)
305
+
306
+ gr.Markdown("_(Best for images with a **simple, single-column layout** (e.g., pure text), or for a **pre-cropped single element** like a table, formula, or chart.)_")
307
+ gr.Markdown("Choose a recognition type:")
308
+ with gr.Row(elem_classes=["prompt-grid"]):
309
+ btn_ocr = gr.Button("Text Recognition", variant="secondary")
310
+ btn_formula = gr.Button("Formula Recognition", "secondary")
311
+ with gr.Row(elem_classes=["prompt-grid"]):
312
+ btn_table = gr.Button("Table Recognition", variant="secondary")
313
+ btn_chart = gr.Button("Chart Recognition", variant="secondary")
314
+
315
+ if targeted_recognition_examples:
316
+ targeted_paths = [e[0] for e in targeted_recognition_examples]
317
+ targeted_state = gr.State(targeted_paths)
318
+ gr.Markdown("**Content Recognition Examples (Click an image to load)**")
319
+ gallery_targeted = gr.Gallery(value=targeted_paths, columns=4, height=400, preview=False, label=None, allow_preview=False)
320
+ gallery_targeted.select(fn=_on_gallery_select, inputs=[targeted_state], outputs=[file_vl])
321
+
322
+ with gr.Column(scale=7):
323
+ with gr.Tabs():
324
+ with gr.Tab("Recognition Result"):
325
+ md_preview_vl = gr.Markdown("Please upload an image and click a recognition type.", latex_delimiters=LATEX_DELIMS, elem_id="md_preview_vl")
326
+ with gr.Tab("Raw Output"):
327
+ md_raw_vl = gr.Code(label="Raw Output", language="markdown")
328
+
329
+ file_vl.change(fn=update_preview_visibility, inputs=[file_vl], outputs=[preview_vl_html])
330
+ btn_ocr.click(fn=handle_targeted_recognition, inputs=[file_vl, gr.State("Text Recognition")], outputs=[md_preview_vl, md_raw_vl])
331
+ btn_formula.click(fn=handle_targeted_recognition, inputs=[file_vl, gr.State("Formula Recognition")], outputs=[md_preview_vl, md_raw_vl])
332
+ btn_table.click(fn=handle_targeted_recognition, inputs=[file_vl, gr.State("Table Recognition")], outputs=[md_preview_vl, md_raw_vl])
333
+ btn_chart.click(fn=handle_targeted_recognition, inputs=[file_vl, gr.State("Chart Recognition")], outputs=[md_preview_vl, md_raw_vl])
334
+
335
+ if __name__ == "__main__":
336
+ demo.queue()
337
+ demo.launch(server_name="0.0.0.0", server_port=8812, share=False)
assets/logo.jpg ADDED

Git LFS Details

  • SHA256: 8b8529252ba0bfc47ab937377f401052a671050914b48445323d2f93a17f3e6d
  • Pointer size: 131 Bytes
  • Size of remote file: 269 kB
examples/complex/0a79472d2d5a4eb665f03577ffd13e86.png ADDED

Git LFS Details

  • SHA256: 5fbc113589936122749b4e959eab54cd46738aa98fde2113871066737b7998d4
  • Pointer size: 131 Bytes
  • Size of remote file: 458 kB
examples/complex/0aaee917395d2d4359d50c56ef5f0ab7.png ADDED

Git LFS Details

  • SHA256: fbcc15c6e4cda4a682055746f41e2b845624dbcbc3d2877a6cbb107d6cf061b0
  • Pointer size: 131 Bytes
  • Size of remote file: 494 kB
examples/complex/0b9ec3d03f25f90250e40c26e1ea6846.jpg ADDED

Git LFS Details

  • SHA256: 5d6790f4c6f1a8b9f07773aec52a1aca74d42fa7c794a1475b5c73424425a19e
  • Pointer size: 131 Bytes
  • Size of remote file: 700 kB
examples/complex/247c8f905dd90644643d69032acc8643.png ADDED

Git LFS Details

  • SHA256: b003b414676f84b71f71c6223b7ecafd744de0bcd6a328b29e0bbbb6599b7e87
  • Pointer size: 131 Bytes
  • Size of remote file: 864 kB
examples/complex/699e8d54240058cb198fc2c8dd636cf4.jpg ADDED

Git LFS Details

  • SHA256: c48fac5e2c699f8c1776964d46c5389a532f62b24b659ee7ac5d1e0fc511c45c
  • Pointer size: 130 Bytes
  • Size of remote file: 79.1 kB
examples/complex/84336c1785bc1230add6adfd0665e83a.png ADDED

Git LFS Details

  • SHA256: c5c98e4f9b59f86dc0506d0cf64368ca3e3a84076b34ee056382e8b894c3cf07
  • Pointer size: 131 Bytes
  • Size of remote file: 101 kB
examples/complex/894a195916b499a9324ce554cae66cfc.png ADDED

Git LFS Details

  • SHA256: 5cfdf0ad976961d8d60940a1fe0932879b8937deb11df9ad44a588a995f6918b
  • Pointer size: 131 Bytes
  • Size of remote file: 561 kB
examples/complex/afdd085ad67267e67d9c92647aade7d1.png ADDED

Git LFS Details

  • SHA256: 9030e8919c94b76a06f64a03f640ba0d0efce537150bda133ff0fc95cfe9950a
  • Pointer size: 131 Bytes
  • Size of remote file: 584 kB
examples/complex/c356e25f0b642cdfdabdcc79ef892407.png ADDED

Git LFS Details

  • SHA256: 5da7a14d88252286a458ac1b97b4b2b2c3499df8b2695f11c6f7ef130d6e2054
  • Pointer size: 131 Bytes
  • Size of remote file: 460 kB
examples/complex/d47254fbf3f70c3052a2299f57f99d6e.jpg ADDED

Git LFS Details

  • SHA256: 4337da8eed767e5bed3411813a2a0221b7c3dee08bc40149c0fa81d911139c77
  • Pointer size: 131 Bytes
  • Size of remote file: 379 kB
examples/complex/e5fe1dd2626943aaac7cacc98ff9a82b.png ADDED

Git LFS Details

  • SHA256: 8120ac17b53ea03bc638ed34a5982c3a65d290799fb0a6bcb8179422e693749d
  • Pointer size: 131 Bytes
  • Size of remote file: 888 kB
examples/targeted/06ccb40671a1ad02c788c815a9f407c7.jpg ADDED

Git LFS Details

  • SHA256: 018c4cf57d8d647aa09f95e52886616f82f3645d49ea36a1023a34837072d837
  • Pointer size: 131 Bytes
  • Size of remote file: 240 kB
examples/targeted/3920c9f7ce52d3558210722c6482295d.png ADDED

Git LFS Details

  • SHA256: b564c3874fce1c1dc1ab9a72672a04c49f48dce2f220e7106b33a82fcfd52465
  • Pointer size: 131 Bytes
  • Size of remote file: 696 kB
examples/targeted/45799efd860556b6a9d0b222c700439e.jpg ADDED

Git LFS Details

  • SHA256: 9122b291e49168aef4976226ed22f1212867dfc94349831d1593a334cbf77bd8
  • Pointer size: 131 Bytes
  • Size of remote file: 444 kB
examples/targeted/551cef7cc194876b37b2dd940e67da8f.jpg ADDED

Git LFS Details

  • SHA256: c4d8b73b45295b60e302db249294e303f13ffdcd033ae29ffb3b86f15c14c6db
  • Pointer size: 131 Bytes
  • Size of remote file: 309 kB
examples/targeted/787acdca195d9cc6d171aa6383451973.png ADDED

Git LFS Details

  • SHA256: 536f16b1af383a0941255ad67759cc55f2c69daa5a3f1807ef7f77726e4f69dd
  • Pointer size: 131 Bytes
  • Size of remote file: 120 kB
examples/targeted/d4c059f384cd057f0d9aa9efd1fd9655.jpg ADDED

Git LFS Details

  • SHA256: 4c78abbc1f43ef5c97541c95f29e53db2f81338c94ebd6f079b942d8ea0b538e
  • Pointer size: 131 Bytes
  • Size of remote file: 685 kB
examples/targeted/e5829e2de95961b2cf97e1c86b21d880.png ADDED

Git LFS Details

  • SHA256: 9557993dc5b11c318466d393c497b80d15e2a323e68d1d4f9fef8018227cc681
  • Pointer size: 131 Bytes
  • Size of remote file: 394 kB
examples/targeted/formula_1.png ADDED

Git LFS Details

  • SHA256: 5c55b23c950b246f6a927878bdc737cc1508a9cf099cb7819ec86e8e5fa6f8c0
  • Pointer size: 130 Bytes
  • Size of remote file: 74.3 kB
examples/targeted/formula_2.png ADDED

Git LFS Details

  • SHA256: 46b450f025487b457d89d131821f353a5433fc3b346219a90d925db7755e9e12
  • Pointer size: 130 Bytes
  • Size of remote file: 49.2 kB
examples/targeted/ocr_1.png ADDED

Git LFS Details

  • SHA256: 9f59b3dcf2022e0ca76eb218b6c0ce1f05420b11b35db1f8d3aab650b7ebcd20
  • Pointer size: 131 Bytes
  • Size of remote file: 405 kB
examples/targeted/ocr_2.png ADDED

Git LFS Details

  • SHA256: 62c972b9d5f26043f32e3d7c6d50b748b1b631eabdf0c62b3ccdd1416dfdad51
  • Pointer size: 131 Bytes
  • Size of remote file: 301 kB
examples/targeted/ocr_3.png ADDED

Git LFS Details

  • SHA256: 94d0d87bee90b7e3544590a1b0e3892845c80b0c46701ad164d9d2c402184a1a
  • Pointer size: 131 Bytes
  • Size of remote file: 188 kB