root
add lib
68676d8
import base64
import io
import json
import os
from typing import Dict, List, Tuple, Any, Optional
import time
import requests
from PIL import Image
import gradio as gr
import re
# =========================
# Config
# =========================
DEFAULT_API_URL = os.environ.get("API_URL")
TOKEN = os.environ.get("TOKEN")
LOGO_IMAGE_PATH = './assets/logo.jpg'
GOOGLE_FONTS_URL = "<link href='https://fonts.googleapis.com/css2?family=Noto+Sans+SC:wght@400;700&display=swap' rel='stylesheet'>"
LATEX_DELIMS = [
{"left": "$$", "right": "$$", "display": True},
{"left": "$", "right": "$", "display": False},
{"left": "\\(", "right": "\\)", "display": False},
{"left": "\\[", "right": "\\]", "display": True},
]
AUTH_HEADER = {"Authorization": f"bearer {TOKEN}"}
JSON_HEADERS = {**AUTH_HEADER, "Content-Type": "application/json"}
# =========================
# Base64 and Example Loading Logic
# =========================
def image_to_base64_data_url(filepath: str) -> str:
"""Reads a local image file and encodes it into a Base64 Data URL."""
try:
ext = os.path.splitext(filepath)[1].lower()
mime_types = {'.jpg': 'image/jpeg', '.jpeg': 'image/jpeg', '.png': 'image/png', '.gif': 'image/gif'}
mime_type = mime_types.get(ext, 'image/jpeg')
with open(filepath, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read()).decode("utf-8")
return f"data:{mime_type};base64,{encoded_string}"
except Exception as e:
print(f"Error encoding image to Base64: {e}")
return ""
def _escape_inequalities_in_math(md: str) -> str:
"""
Finds math blocks in a Markdown string and replaces < and > with
their LaTeX equivalents, \lt and \gt, to prevent markdown parsing errors.
"""
_MATH_PATTERNS = [
re.compile(r"\$\$([\s\S]+?)\$\$"),
re.compile(r"\$([^\$]+?)\$"),
re.compile(r"\\\[([\s\S]+?)\\\]"),
re.compile(r"\\\(([\s\S]+?)\\\)"),
]
def fix(s: str) -> str:
s = s.replace("<=", r" \le ").replace(">=", r" \ge ")
s = s.replace("≤", r" \le ").replace("≥", r" \ge ")
s = s.replace("<", r" \lt ").replace(">", r" \gt ")
return s
for pat in _MATH_PATTERNS:
md = pat.sub(lambda m: m.group(0).replace(m.group(1), fix(m.group(1))), md)
return md
def _get_examples_from_dir(dir_path: str) -> List[List[str]]:
supported_exts = {".png", ".jpg", ".jpeg", ".bmp", ".webp"}
examples = []
if not os.path.exists(dir_path): return []
for filename in sorted(os.listdir(dir_path)):
if os.path.splitext(filename)[1].lower() in supported_exts:
examples.append([os.path.join(dir_path, filename)])
return examples
TARGETED_EXAMPLES_DIR = "examples/targeted"
COMPLEX_EXAMPLES_DIR = "examples/complex"
targeted_recognition_examples = _get_examples_from_dir(TARGETED_EXAMPLES_DIR)
complex_document_examples = _get_examples_from_dir(COMPLEX_EXAMPLES_DIR)
# =========================
# UI Helpers
# =========================
def render_uploaded_image_div(file_path: str) -> str:
data_url = image_to_base64_data_url(file_path)
return f"""
<div class="uploaded-image">
<img src="{data_url}" alt="Uploaded image" style="width:100%;height:100%;object-fit:contain;"/>
</div>
"""
def update_preview_visibility(file_path: Optional[str]) -> Dict:
if file_path:
html_content = render_uploaded_image_div(file_path)
return gr.update(value=html_content, visible=True)
else:
return gr.update(value="", visible=False)
def _on_gallery_select(example_paths: List[str], evt: gr.SelectData):
try:
idx = evt.index
return example_paths[idx]
except Exception:
return None
# =========================
# API Call Logic
# =========================
def _file_to_b64_image_only(file_path: str) -> Tuple[str, int]:
if not file_path: raise ValueError("Please upload an image first.")
ext = os.path.splitext(file_path)[1].lower()
if ext not in {".png", ".jpg", ".jpeg", ".bmp", ".webp"}: raise ValueError("Only image files are supported.")
with open(file_path, "rb") as f:
return base64.b64encode(f.read()).decode("utf-8"), 1
def _call_api(api_url: str, file_path: str, use_layout_detection: bool,
prompt_label: Optional[str], use_chart_recognition: bool = False) -> Dict[str, Any]:
b64, file_type = _file_to_b64_image_only(file_path)
payload = {
"file": b64,
"useLayoutDetection": bool(use_layout_detection),
"fileType": file_type,
"layoutMergeBboxesMode": "union",
}
if not use_layout_detection:
if not prompt_label:
raise ValueError("Please select a recognition type.")
payload["promptLabel"] = prompt_label.strip().lower()
if use_layout_detection and use_chart_recognition:
payload["useChartRecognition"] = True
try:
print(f"Sending API request to {api_url}...")
start_time = time.time()
resp = requests.post(api_url, json=payload, headers=JSON_HEADERS, timeout=600)
end_time = time.time()
duration = end_time - start_time
print(f"Received API response in {duration:.2f} seconds.")
resp.raise_for_status()
data = resp.json()
except requests.exceptions.RequestException as e:
raise gr.Error(f"API request failed:{e}")
except json.JSONDecodeError:
raise gr.Error(f"Invalid JSON response from server:\n{getattr(resp, 'text', '')}")
if data.get("errorCode", -1) != 0:
raise gr.Error("API returned an error:")
return data
# =========================
# API Response Processing
# =========================
# 【改动点】: 这个函数现在不再需要,因为我们不再将URL下载为PIL Image对象。
# def url_to_pil_image(url: str) -> Optional[Image.Image]:
# """Downloads an image from a URL and returns it as a PIL Image object for the Gradio Image component."""
# if not url or not url.startswith(('http://', 'https://')):
# print(f"Warning: Invalid URL provided for visualization image: {url}")
# return None
# try:
# start_time = time.time()
# response = requests.get(url, timeout=600)
# end_time = time.time()
# print(f"Fetched visualization image from {url} in {end_time - start_time:.2f} seconds.")
#
# response.raise_for_status()
# image_bytes = response.content
# pil_image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
# return pil_image
# except requests.exceptions.RequestException as e:
# print(f"Error fetching visualization image from URL {url}: {e}")
# return None
# except Exception as e:
# print(f"Error processing visualization image from URL {url}: {e}")
# return None
def _process_api_response_page(result: Dict[str, Any]) -> Tuple[str, str, str]:
"""
Processes the API response.
1. Replaces markdown image placeholders with their direct URLs.
2. Constructs an HTML <img> tag string for the visualization image URL.
"""
layout_results = (result or {}).get("layoutParsingResults", [])
if not layout_results:
return "No content was recognized.", "<p>No visualization available.</p>", ""
page0 = layout_results[0] or {}
# Step 1: Process Markdown content (unchanged from previous optimization)
md_data = page0.get("markdown") or {}
md_text = md_data.get("text", "") or ""
md_images_map = md_data.get("images", {})
if md_images_map:
for placeholder_path, image_url in md_images_map.items():
md_text = md_text.replace(f'src="{placeholder_path}"', f'src="{image_url}"') \
.replace(f']({placeholder_path})', f']({image_url})')
# 【核心改动点】 Step 2: Process Visualization images by creating an HTML string
output_html = "<p style='text-align:center; color:#888;'>No visualization image available.</p>"
out_imgs = page0.get("outputImages") or {}
# Get all image URLs and sort them
sorted_urls = [img_url for _, img_url in sorted(out_imgs.items()) if img_url]
# Logic to select the final visualization image URL
output_image_url: Optional[str] = None
if len(sorted_urls) >= 2:
output_image_url = sorted_urls[1]
elif sorted_urls:
output_image_url = sorted_urls[0]
# If a URL was found, create the <img> tag
if output_image_url:
print(f"Found visualization image URL: {output_image_url}")
# The CSS will style this `img` tag because of the `#vis_image_doc img` selector
output_html = f'<img src="{output_image_url}" alt="Detection Visualization">'
else:
print("Warning: No visualization image URL found in the API response.")
md_text = _escape_inequalities_in_math(md_text)
return md_text or "(Empty result)", output_html, md_text
# =========================
# Handlers
# =========================
def handle_complex_doc(file_path: str, use_chart_recognition: bool) -> Tuple[str, str, str]:
if not file_path: raise gr.Error("Please upload an image first.")
data = _call_api(DEFAULT_API_URL, file_path, use_layout_detection=True, prompt_label=None, use_chart_recognition=use_chart_recognition)
result = data.get("result", {})
# Note the return types now align with the new function signature
return _process_api_response_page(result)
def handle_targeted_recognition(file_path: str, prompt_choice: str) -> Tuple[str, str]:
if not file_path: raise gr.Error("Please upload an image first.")
mapping = {"Text Recognition": "ocr", "Formula Recognition": "formula", "Table Recognition": "table", "Chart Recognition": "chart"}
label = mapping.get(prompt_choice, "ocr")
data = _call_api(DEFAULT_API_URL, file_path, use_layout_detection=False, prompt_label=label)
result = data.get("result", {})
md_preview, _, md_raw = _process_api_response_page(result)
return md_preview, md_raw
# =========================
# CSS & UI
# =========================
custom_css = """
/* 全局字体 */
body, .gradio-container {
font-family: "Noto Sans SC", "Microsoft YaHei", "PingFang SC", sans-serif;
}
/* ... (rest of the CSS is unchanged) ... */
.app-header { text-align: center; max-width: 900px; margin: 0 auto 8px !important; }
.gradio-container { padding: 4px 0 !important; }
.gradio-container [data-testid="tabs"], .gradio-container .tabs { margin-top: 0 !important; }
.gradio-container [data-testid="tabitem"], .gradio-container .tabitem { padding-top: 4px !important; }
.quick-links { text-align: center; padding: 8px 0; border: 1px solid #e5e7eb; border-radius: 8px; margin: 8px auto; max-width: 900px; }
.quick-links a { margin: 0 12px; font-size: 14px; font-weight: 600; color: #3b82f6; text-decoration: none; }
.quick-links a:hover { text-decoration: underline; }
.prompt-grid { display: flex; flex-wrap: wrap; gap: 8px; margin-top: 6px; }
.prompt-grid button { height: 40px !important; padding: 0 12px !important; border-radius: 8px !important; font-weight: 600 !important; font-size: 13px !important; letter-spacing: 0.2px; }
#image_preview_vl, #image_preview_doc { height: 400px !important; overflow: auto; }
#image_preview_vl img, #image_preview_doc img, #vis_image_doc img { width: 100% !important; height: auto !important; object-fit: contain !important; display: block; }
#md_preview_vl, #md_preview_doc { max-height: 540px; min-height: 180px; overflow: auto; scrollbar-gutter: stable both-edges; }
#md_preview_vl .prose, #md_preview_doc .prose { line-height: 1.7 !important; }
#md_preview_vl .prose img, #md_preview_doc .prose img { display: block; margin: 0 auto; max-width: 100%; height: auto; }
.notice { margin: 8px auto 0; max-width: 900px; padding: 10px 12px; border: 1px solid #e5e7eb; border-radius: 8px; background: #f8fafc; font-size: 14px; line-height: 1.6; }
.notice strong { font-weight: 700; }
.notice a { color: #3b82f6; text-decoration: none; }
.notice a:hover { text-decoration: underline; }
"""
with gr.Blocks(head=GOOGLE_FONTS_URL, css=custom_css, theme=gr.themes.Soft()) as demo:
logo_data_url = image_to_base64_data_url(LOGO_IMAGE_PATH) if os.path.exists(LOGO_IMAGE_PATH) else ""
gr.HTML(f"""<div class="app-header"><img src="{logo_data_url}" alt="App Logo" style="max-height:10%; width: auto; margin: 10px auto; display: block;"></div>""")
gr.HTML("""<div class="notice"><strong>Heads up:</strong> The Hugging Face demo can be slow at times. For a faster experience, please try <a href="https://aistudio.baidu.com/application/detail/98365" target="_blank" rel="noopener noreferrer">Baidu AI Studio</a> or <a href="https://modelscope.cn/studios/PaddlePaddle/PaddleOCR-VL_Online_Demo/summary" target="_blank" rel="noopener noreferrer">ModelScope</a>.</div>""")
gr.HTML("""<div class="quick-links"><a href="https://github.com/PaddlePaddle/PaddleOCR" target="_blank">GitHub</a> | <a href="https://ernie.baidu.com/blog/publication/PaddleOCR-VL_Technical_Report.pdf" target="_blank">Technical Report</a> | <a href="https://huggingface.co/PaddlePaddle/PaddleOCR-VL" target="_blank">Model</a></div>""")
with gr.Tabs():
with gr.Tab("Document Parsing"):
with gr.Row():
with gr.Column(scale=5):
file_doc = gr.File(label="Upload Image", file_count="single", type="filepath", file_types=["image"])
preview_doc_html = gr.HTML(value="", elem_id="image_preview_doc", visible=False)
gr.Markdown("_( Use this mode for recognizing full-page documents with structured layouts, such as reports, papers, or magazines.)_")
gr.Markdown("💡 *To recognize a single, pre-cropped element (e.g., a table or formula), switch to the 'Element-level Recognition' tab for better results.*")
with gr.Row(variant="panel"):
chart_parsing_switch = gr.Checkbox(label="Enable chart parsing", value=False, scale=1)
btn_parse = gr.Button("Parse Document", variant="primary", scale=2)
if complex_document_examples:
complex_paths = [e[0] for e in complex_document_examples]
complex_state = gr.State(complex_paths)
gr.Markdown("**Document Examples (Click an image to load)**")
gallery_complex = gr.Gallery(value=complex_paths, columns=4, height=400, preview=False, label=None, allow_preview=False)
gallery_complex.select(fn=_on_gallery_select, inputs=[complex_state], outputs=[file_doc])
with gr.Column(scale=7):
with gr.Tabs():
with gr.Tab("Markdown Preview"):
md_preview_doc = gr.Markdown("Please upload an image and click 'Parse Document'.", latex_delimiters=LATEX_DELIMS, elem_id="md_preview_doc")
with gr.Tab("Visualization"):
# 【核心改动点】: 将 gr.Image 替换为 gr.HTML
vis_image_doc = gr.HTML(label="Detection Visualization", elem_id="vis_image_doc")
with gr.Tab("Markdown Source"):
md_raw_doc = gr.Code(label="Markdown Source Code", language="markdown")
file_doc.change(fn=update_preview_visibility, inputs=[file_doc], outputs=[preview_doc_html])
btn_parse.click(fn=handle_complex_doc, inputs=[file_doc, chart_parsing_switch], outputs=[md_preview_doc, vis_image_doc, md_raw_doc])
with gr.Tab("Element-level Recognition"):
with gr.Row():
with gr.Column(scale=5):
file_vl = gr.File(label="Upload Image", file_count="single", type="filepath", file_types=["image"])
preview_vl_html = gr.HTML(value="", elem_id="image_preview_vl", visible=False)
gr.Markdown("_(Best for images with a **simple, single-column layout** (e.g., pure text), or for a **pre-cropped single element** like a table, formula, or chart.)_")
gr.Markdown("Choose a recognition type:")
with gr.Row(elem_classes=["prompt-grid"]):
btn_ocr = gr.Button("Text Recognition", variant="secondary")
btn_formula = gr.Button("Formula Recognition", "secondary")
with gr.Row(elem_classes=["prompt-grid"]):
btn_table = gr.Button("Table Recognition", variant="secondary")
btn_chart = gr.Button("Chart Recognition", variant="secondary")
if targeted_recognition_examples:
targeted_paths = [e[0] for e in targeted_recognition_examples]
targeted_state = gr.State(targeted_paths)
gr.Markdown("**Element-level Recognition Examples (Click an image to load)**")
gallery_targeted = gr.Gallery(value=targeted_paths, columns=4, height=400, preview=False, label=None, allow_preview=False)
gallery_targeted.select(fn=_on_gallery_select, inputs=[targeted_state], outputs=[file_vl])
with gr.Column(scale=7):
with gr.Tabs():
with gr.Tab("Recognition Result"):
md_preview_vl = gr.Markdown("Please upload an image and click a recognition type.", latex_delimiters=LATEX_DELIMS, elem_id="md_preview_vl")
with gr.Tab("Raw Output"):
md_raw_vl = gr.Code(label="Raw Output", language="markdown")
file_vl.change(fn=update_preview_visibility, inputs=[file_vl], outputs=[preview_vl_html])
btn_ocr.click(fn=handle_targeted_recognition, inputs=[file_vl, gr.State("Text Recognition")], outputs=[md_preview_vl, md_raw_vl])
btn_formula.click(fn=handle_targeted_recognition, inputs=[file_vl, gr.State("Formula Recognition")], outputs=[md_preview_vl, md_raw_vl])
btn_table.click(fn=handle_targeted_recognition, inputs=[file_vl, gr.State("Table Recognition")], outputs=[md_preview_vl, md_raw_vl])
btn_chart.click(fn=handle_targeted_recognition, inputs=[file_vl, gr.State("Chart Recognition")], outputs=[md_preview_vl, md_raw_vl])
if __name__ == "__main__":
port = int(os.getenv("PORT", "7860"))
demo.queue(max_size=6).launch(server_name="0.0.0.0", server_port=port,share=False)