File size: 14,800 Bytes
2aaba47
6290d3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18c93c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6290d3f
107f99d
 
 
 
 
 
 
 
 
 
18c93c9
107f99d
 
 
 
 
 
 
 
18c93c9
 
 
 
 
107f99d
 
 
 
 
 
 
 
 
18c93c9
 
 
 
107f99d
 
 
 
 
 
 
 
 
18c93c9
 
107f99d
 
 
 
6290d3f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2aaba47
6290d3f
 
 
 
 
 
2aaba47
6290d3f
 
2aaba47
 
6290d3f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
import gradio as gr
from transformers import AutoModel, AutoTokenizer
import torch
import spaces
import os
import sys
import tempfile
import shutil
from PIL import Image, ImageDraw, ImageFont, ImageOps
import fitz
import re
import warnings
import numpy as np
import base64
from io import StringIO, BytesIO

# 模型路径配置
# 方式1: 使用在线模型(默认)
MODEL_PATH = 'deepseek-ai/DeepSeek-OCR'

# 方式2: 使用本地下载的模型(推荐)
# 将模型下载到本地后,修改为本地路径,例如:
# MODEL_PATH = './models/DeepSeek-OCR'  # 本地模型路径
# MODEL_PATH = 'E:/hugging_face/models/DeepSeek-OCR'  # 或使用绝对路径

# 如果本地路径不存在,则使用在线模型
if not os.path.exists(MODEL_PATH):
    print(f"本地模型路径不存在: {MODEL_PATH}")
    print("将使用在线模型: deepseek-ai/DeepSeek-OCR")
    MODEL_PATH = 'deepseek-ai/DeepSeek-OCR'
else:
    print(f"使用本地模型: {MODEL_PATH}")

# Auto-detect device (GPU if available, else CPU)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
print(f"使用设备: {device}, 数据类型: {torch_dtype}")

tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, trust_remote_code=True)

# 加载模型
if device == "cpu":
    # CPU 模式:使用 float32 避免类型不匹配
    print("⚠️  CPU 模式:强制使用 float32(bfloat16 在 CPU 上不完全支持)")
    model = AutoModel.from_pretrained(
        MODEL_PATH, 
        trust_remote_code=True, 
        use_safetensors=True,
        torch_dtype=torch.float32,  # CPU 必须使用 float32
        low_cpu_mem_usage=True
    )
    model = model.eval().float()  # 确保所有参数都是 float32
else:
    # GPU 模式:可以使用 bfloat16
    model = AutoModel.from_pretrained(
        MODEL_PATH, 
        trust_remote_code=True, 
        use_safetensors=True,
        torch_dtype=torch.bfloat16
    )
    model = model.eval().to(device)

# 创建设备兼容的推理包装器
original_infer = model.infer

def device_compatible_infer(*args, **kwargs):
    """设备兼容的推理包装器,支持 CPU/GPU 自动切换"""
    import torch
    
    # 临时修补 torch.cuda.is_available 和相关方法
    old_is_available = torch.cuda.is_available
    old_cuda_method = None
    old_float_tensor = None
    
    try:
        # 如果是 CPU 模式,劫持 CUDA 调用
        if device == "cpu":
            torch.cuda.is_available = lambda: False
            
            # 修补 tensor.cuda() 方法
            def cpu_wrapper(self, *args, **kwargs):
                # 确保返回 float32 类型
                result = self.cpu()
                if result.dtype == torch.bfloat16:
                    result = result.float()
                return result
            
            # 保存原始方法
            if hasattr(torch.Tensor, '_original_cuda'):
                old_cuda_method = torch.Tensor._original_cuda
            else:
                old_cuda_method = torch.Tensor.cuda
                torch.Tensor._original_cuda = old_cuda_method
            
            torch.Tensor.cuda = cpu_wrapper
            
            # 修补 torch.cuda.FloatTensor
            old_float_tensor = torch.cuda.FloatTensor
            torch.cuda.FloatTensor = torch.FloatTensor
        
        # 调用原始 infer 方法
        return original_infer(*args, **kwargs)
    
    finally:
        # 恢复原始方法
        torch.cuda.is_available = old_is_available
        if old_cuda_method is not None:
            torch.Tensor.cuda = old_cuda_method
        if old_float_tensor is not None:
            torch.cuda.FloatTensor = old_float_tensor

# 替换模型的 infer 方法
model.infer = device_compatible_infer

MODEL_CONFIGS = {
    "⚡ Gundam": {"base_size": 1024, "image_size": 640, "crop_mode": True},
    "🚀 Tiny": {"base_size": 512, "image_size": 512, "crop_mode": False},
    "📄 Small": {"base_size": 640, "image_size": 640, "crop_mode": False},
    "📊 Base": {"base_size": 1024, "image_size": 1024, "crop_mode": False},
    "🎯 Large": {"base_size": 1280, "image_size": 1280, "crop_mode": False}
}

TASK_PROMPTS = {
    "📋 Markdown": {"prompt": "<image>\n<|grounding|>Convert the document to markdown.", "has_grounding": True},
    "📝 Free OCR": {"prompt": "<image>\nFree OCR.", "has_grounding": False},
    "📍 Locate": {"prompt": "<image>\nLocate <|ref|>text<|/ref|> in the image.", "has_grounding": True},
    "🔍 Describe": {"prompt": "<image>\nDescribe this image in detail.", "has_grounding": False},
    "✏️ Custom": {"prompt": "", "has_grounding": False}
}


def extract_grounding_references(text):
    pattern = r'(<\|ref\|>(.*?)<\|/ref\|><\|det\|>(.*?)<\|/det\|>)'
    return re.findall(pattern, text, re.DOTALL)


def draw_bounding_boxes(image, refs, extract_images=False):
    img_w, img_h = image.size
    img_draw = image.copy()
    draw = ImageDraw.Draw(img_draw)
    overlay = Image.new('RGBA', img_draw.size, (0, 0, 0, 0))
    draw2 = ImageDraw.Draw(overlay)
    font = ImageFont.load_default()
    crops = []

    for ref in refs:
        label = ref[1]
        coords = eval(ref[2])
        color = (np.random.randint(50, 255), np.random.randint(
            50, 255), np.random.randint(50, 255))
        color_a = color + (60,)

        for box in coords:
            x1, y1, x2, y2 = int(
                box[0]/999*img_w), int(box[1]/999*img_h), int(box[2]/999*img_w), int(box[3]/999*img_h)

            if extract_images and label == 'image':
                crops.append(image.crop((x1, y1, x2, y2)))

            width = 5 if label == 'title' else 3
            draw.rectangle([x1, y1, x2, y2], outline=color, width=width)
            draw2.rectangle([x1, y1, x2, y2], fill=color_a)

            text_bbox = draw.textbbox((0, 0), label, font=font)
            tw, th = text_bbox[2] - text_bbox[0], text_bbox[3] - text_bbox[1]
            ty = max(0, y1 - 20)
            draw.rectangle([x1, ty, x1 + tw + 4, ty + th + 4], fill=color)
            draw.text((x1 + 2, ty + 2), label, font=font, fill=(255, 255, 255))

    img_draw.paste(overlay, (0, 0), overlay)
    return img_draw, crops


def clean_output(text, include_images=False, remove_labels=False):
    if not text:
        return ""
    pattern = r'(<\|ref\|>(.*?)<\|/ref\|><\|det\|>(.*?)<\|/det\|>)'
    matches = re.findall(pattern, text, re.DOTALL)
    img_num = 0

    for match in matches:
        if '<|ref|>image<|/ref|>' in match[0]:
            if include_images:
                text = text.replace(
                    match[0], f'\n\n**[Figure {img_num + 1}]**\n\n', 1)
                img_num += 1
            else:
                text = text.replace(match[0], '', 1)
        else:
            if remove_labels:
                text = text.replace(match[0], '', 1)
            else:
                text = text.replace(match[0], match[1], 1)

    return text.strip()


def embed_images(markdown, crops):
    if not crops:
        return markdown
    for i, img in enumerate(crops):
        buf = BytesIO()
        img.save(buf, format="PNG")
        b64 = base64.b64encode(buf.getvalue()).decode()
        markdown = markdown.replace(
            f'**[Figure {i + 1}]**', f'\n\n![Figure {i + 1}](data:image/png;base64,{b64})\n\n', 1)
    return markdown


@spaces.GPU(duration=60)
def process_image(image, mode, task, custom_prompt):
    if image is None:
        return " Error Upload image", "", "", None, []
    if task in ["✏️ Custom", "📍 Locate"] and not custom_prompt.strip():
        return "Enter prompt", "", "", None, []

    if image.mode in ('RGBA', 'LA', 'P'):
        image = image.convert('RGB')
    image = ImageOps.exif_transpose(image)

    config = MODEL_CONFIGS[mode]

    if task == "✏️ Custom":
        prompt = f"<image>\n{custom_prompt.strip()}"
        has_grounding = '<|grounding|>' in custom_prompt
    elif task == "📍 Locate":
        prompt = f"<image>\nLocate <|ref|>{custom_prompt.strip()}<|/ref|> in the image."
        has_grounding = True
    else:
        prompt = TASK_PROMPTS[task]["prompt"]
        has_grounding = TASK_PROMPTS[task]["has_grounding"]

    tmp = tempfile.NamedTemporaryFile(delete=False, suffix='.jpg')
    image.save(tmp.name, 'JPEG', quality=95)
    tmp.close()
    out_dir = tempfile.mkdtemp()

    stdout = sys.stdout
    sys.stdout = StringIO()

    model.infer(tokenizer=tokenizer, prompt=prompt, image_file=tmp.name, output_path=out_dir,
                base_size=config["base_size"], image_size=config["image_size"], crop_mode=config["crop_mode"])

    result = '\n'.join([l for l in sys.stdout.getvalue().split('\n')
                        if not any(s in l for s in ['image:', 'other:', 'PATCHES', '====', 'BASE:', '%|', 'torch.Size'])]).strip()
    sys.stdout = stdout

    os.unlink(tmp.name)
    shutil.rmtree(out_dir, ignore_errors=True)

    if not result:
        return "No text", "", "", None, []

    cleaned = clean_output(result, False, False)
    markdown = clean_output(result, True, True)

    img_out = None
    crops = []

    if has_grounding and '<|ref|>' in result:
        refs = extract_grounding_references(result)
        if refs:
            img_out, crops = draw_bounding_boxes(image, refs, True)

    markdown = embed_images(markdown, crops)

    return cleaned, markdown, result, img_out, crops


@spaces.GPU(duration=300)
def process_pdf(path, mode, task, custom_prompt):
    doc = fitz.open(path)
    texts, markdowns, raws, all_crops = [], [], [], []

    for i in range(len(doc)):
        page = doc.load_page(i)
        pix = page.get_pixmap(matrix=fitz.Matrix(300/72, 300/72), alpha=False)
        img = Image.open(BytesIO(pix.tobytes("png")))

        text, md, raw, _, crops = process_image(img, mode, task, custom_prompt)

        if text and text != "No text":
            texts.append(f"### Page {i + 1}\n\n{text}")
            markdowns.append(f"### Page {i + 1}\n\n{md}")
            raws.append(f"=== Page {i + 1} ===\n{raw}")
            all_crops.extend(crops)

    doc.close()

    return ("\n\n---\n\n".join(texts) if texts else "No text in PDF",
            "\n\n---\n\n".join(markdowns) if markdowns else "No text in PDF",
            "\n\n".join(raws), None, all_crops)


def process_file(path, mode, task, custom_prompt):
    if not path:
        return "Error Upload file", "", "", None, []

    if path.lower().endswith('.pdf'):
        return process_pdf(path, mode, task, custom_prompt)
    else:
        return process_image(Image.open(path), mode, task, custom_prompt)


def toggle_prompt(task):
    if task == "✏️ Custom":
        return gr.update(visible=True, label="Custom Prompt", placeholder="Add <|grounding|> for boxes")
    elif task == "📍 Locate":
        return gr.update(visible=True, label="Text to Locate", placeholder="Enter text")
    return gr.update(visible=False)


def load_image(file_path):
    if not file_path:
        return None
    if file_path.lower().endswith('.pdf'):
        doc = fitz.open(file_path)
        page = doc.load_page(0)
        pix = page.get_pixmap(matrix=fitz.Matrix(300/72, 300/72), alpha=False)
        img = Image.open(BytesIO(pix.tobytes("png")))
        doc.close()
        return img
    else:
        return Image.open(file_path)


with gr.Blocks(theme=gr.themes.Soft(), title="DeepSeek-OCR") as demo:
    gr.Markdown("""
    # 🚀 DeepSeek-OCR Demo
    **Convert documents to markdown, extract raw text, and locate specific content with bounding boxes. Check the info at the bottom of the page for more information.**
    
    **Hope this tool was helpful! If so, a quick like ❤️ would mean a lot :)**
    """)

    with gr.Row():
        with gr.Column(scale=1):
            file_in = gr.File(label="Upload Image or PDF", file_types=[
                              "image", ".pdf"], type="filepath")
            input_img = gr.Image(label="Input Image", type="pil", height=300)
            mode = gr.Dropdown(list(MODEL_CONFIGS.keys()),
                               value="⚡ Gundam", label="Mode")
            task = gr.Dropdown(list(TASK_PROMPTS.keys()),
                               value="📋 Markdown", label="Task")
            prompt = gr.Textbox(label="Prompt", lines=2, visible=False)
            btn = gr.Button("Extract", variant="primary", size="lg")

        with gr.Column(scale=2):
            with gr.Tabs():
                with gr.Tab("📝 Text"):
                    text_out = gr.Textbox(
                        lines=20, show_copy_button=True, show_label=False)
                with gr.Tab("🎨 Markdown"):
                    md_out = gr.Markdown("")
                with gr.Tab("🖼️ Boxes"):
                    img_out = gr.Image(
                        type="pil", height=500, show_label=False)
                with gr.Tab("🖼️ Cropped Images"):
                    gallery = gr.Gallery(
                        show_label=False, columns=3, height=400)
                with gr.Tab("🔍 Raw"):
                    raw_out = gr.Textbox(
                        lines=20, show_copy_button=True, show_label=False)

    gr.Examples(
        examples=[
            ["examples/ocr.jpg", "⚡ Gundam", "📋 Markdown", ""],
            ["examples/reachy-mini.jpg", "⚡ Gundam", "📍 Locate", "Robot"]
        ],
        inputs=[input_img, mode, task, prompt],
        cache_examples=False
    )

    with gr.Accordion("ℹ️ Info", open=False):
        gr.Markdown("""
        ### Modes
        - **Gundam**: 1024 base + 640 tiles with cropping - Best balance
        - **Tiny**: 512×512, no crop - Fastest
        - **Small**: 640×640, no crop - Quick
        - **Base**: 1024×1024, no crop - Standard
        - **Large**: 1280×1280, no crop - Highest quality
        
        ### Tasks
        - **Markdown**: Convert document to structured markdown (grounding ✅)
        - **Free OCR**: Simple text extraction
        - **Locate**: Find specific text in image (grounding ✅)
        - **Describe**: General image description
        - **Custom**: Your own prompt (add `<|grounding|>` for boxes)
        """)

    file_in.change(load_image, [file_in], [input_img])
    task.change(toggle_prompt, [task], [prompt])

    def run(image, file_path, mode, task, custom_prompt):
        if image is not None:
            return process_image(image, mode, task, custom_prompt)
        if file_path:
            return process_file(file_path, mode, task, custom_prompt)
        return "Error uploading file or image", "", "", None, []

    btn.click(run, [input_img, file_in, mode, task, prompt],
              [text_out, md_out, raw_out, img_out, gallery])

if __name__ == "__main__":
    demo.queue(max_size=20).launch()