prithivMLmods commited on
Commit
81d2b64
·
verified ·
1 Parent(s): 3af57f5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +256 -230
app.py CHANGED
@@ -1,28 +1,57 @@
 
 
 
1
  import os
 
 
 
 
2
  import time
3
- import threading
 
 
 
 
4
  import gradio as gr
5
- import spaces
6
  import torch
7
- import numpy as np
8
  from PIL import Image
9
- import cv2
 
 
 
10
  from transformers import (
11
  Qwen2_5_VLForConditionalGeneration,
12
  Qwen2VLForConditionalGeneration,
13
- #Glm4vForConditionalGeneration,
14
  AutoProcessor,
15
  TextIteratorStreamer,
 
16
  )
17
- from qwen_vl_utils import process_vision_info
18
 
19
- # Constants for text generation
20
- MAX_MAX_NEW_TOKENS = 16384
21
- DEFAULT_MAX_NEW_TOKENS = 8192
22
- MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
23
 
24
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
 
 
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  # Load Camel-Doc-OCR-062825
27
  MODEL_ID_M = "prithivMLmods/Camel-Doc-OCR-062825"
28
  processor_m = AutoProcessor.from_pretrained(MODEL_ID_M, trust_remote_code=True)
@@ -50,252 +79,249 @@ model_s = Qwen2_5_VLForConditionalGeneration.from_pretrained(
50
  torch_dtype=torch.float16
51
  ).to(device).eval()
52
 
53
- # Load ViLaSR
54
- MODEL_ID_Y = "inclusionAI/ViLaSR"
55
- processor_y = AutoProcessor.from_pretrained(MODEL_ID_Y, trust_remote_code=True)
56
- model_y = Qwen2_5_VLForConditionalGeneration.from_pretrained(
57
- MODEL_ID_Y,
58
  trust_remote_code=True,
59
- torch_dtype=torch.float16
60
- ).to(device).eval()
 
 
61
 
62
- def downsample_video(video_path):
63
- """
64
- Downsample a video to evenly spaced frames, returning each as a PIL image with its timestamp.
65
- """
66
- vidcap = cv2.VideoCapture(video_path)
67
- total_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
68
- fps = vidcap.get(cv2.CAP_PROP_FPS)
69
- frames = []
70
- frame_indices = np.linspace(0, total_frames - 1, 10, dtype=int)
71
- for i in frame_indices:
72
- vidcap.set(cv2.CAP_PROP_POS_FRAMES, i)
73
- success, image = vidcap.read()
74
- if success:
75
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
76
- pil_image = Image.fromarray(image)
77
- timestamp = round(i / fps, 2)
78
- frames.append((pil_image, timestamp))
79
- vidcap.release()
80
- return frames
81
 
82
- @spaces.GPU
83
- def generate_image(model_name: str, text: str, image: Image.Image,
84
- max_new_tokens: int = 1024,
85
- temperature: float = 0.6,
86
- top_p: float = 0.9,
87
- top_k: int = 50,
88
- repetition_penalty: float = 1.2):
89
  """
90
- Generate responses using the selected model for image input.
 
91
  """
92
- if model_name == "Camel-Doc-OCR-062825":
93
- processor = processor_m
94
- model = model_m
95
- elif model_name == "MinerU2.5-2509":
96
- processor = processor_t
97
- model = model_t
98
- elif model_name == "Video-MTR":
99
- processor = processor_s
100
- model = model_s
101
- elif model_name == "ViLaSR-7B":
102
- processor = processor_y
103
- model = model_y
104
- else:
105
- yield "Invalid model selected.", "Invalid model selected."
106
- return
107
 
108
- if image is None:
109
- yield "Please upload an image.", "Please upload an image."
110
- return
 
 
 
 
 
 
 
 
 
 
 
111
 
112
- messages = [{
113
- "role": "user",
114
- "content": [
115
- {"type": "image", "image": image},
116
- {"type": "text", "text": text},
117
- ]
118
- }]
119
- prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
120
- inputs = processor(
121
- text=[prompt_full],
122
- images=[image],
123
- return_tensors="pt",
124
- padding=True,
125
- truncation=False,
126
- max_length=MAX_INPUT_TOKEN_LENGTH
127
- ).to(device)
128
- streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
129
- generation_kwargs = {**inputs, "streamer": streamer, "max_new_tokens": max_new_tokens}
130
- thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
131
- thread.start()
132
- buffer = ""
133
- for new_text in streamer:
134
- buffer += new_text
135
- time.sleep(0.01)
136
- yield buffer, buffer
137
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
  @spaces.GPU
139
- def generate_video(model_name: str, text: str, video_path: str,
140
- max_new_tokens: int = 1024,
141
- temperature: float = 0.6,
142
- top_p: float = 0.9,
143
- top_k: int = 50,
144
- repetition_penalty: float = 1.2):
 
 
 
 
145
  """
146
- Generate responses using the selected model for video input.
147
  """
148
- if model_name == "Camel-Doc-OCR-062825":
149
- processor = processor_m
150
- model = model_m
151
- elif model_name == "MinerU2.5-2509":
152
- processor = processor_t
153
- model = model_t
154
- elif model_name == "Video-MTR":
155
- processor = processor_s
156
- model = model_s
157
- elif model_name == "ViLaSR-7B":
158
- processor = processor_y
159
- model = model_y
160
- else:
161
- yield "Invalid model selected.", "Invalid model selected."
162
  return
163
-
164
- if video_path is None:
165
- yield "Please upload a video.", "Please upload a video."
166
  return
167
 
168
- frames = downsample_video(video_path)
169
- messages = [
170
- {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
171
- {"role": "user", "content": [{"type": "text", "text": text}]}
172
- ]
173
- for frame in frames:
174
- image, timestamp = frame
175
- messages[1]["content"].append({"type": "text", "text": f"Frame {timestamp}:"})
176
- messages[1]["content"].append({"type": "image", "image": image})
177
- inputs = processor.apply_chat_template(
178
- messages,
179
- tokenize=True,
180
- add_generation_prompt=True,
181
- return_dict=True,
182
- return_tensors="pt",
183
- truncation=False,
184
- max_length=MAX_INPUT_TOKEN_LENGTH
185
- ).to(device)
 
 
 
 
 
 
 
 
186
  streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
 
187
  generation_kwargs = {
188
- **inputs,
189
- "streamer": streamer,
190
  "max_new_tokens": max_new_tokens,
191
- "do_sample": True,
192
  "temperature": temperature,
193
  "top_p": top_p,
194
  "top_k": top_k,
195
  "repetition_penalty": repetition_penalty,
 
196
  }
197
- thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
 
198
  thread.start()
 
199
  buffer = ""
200
  for new_text in streamer:
201
  buffer += new_text
202
  buffer = buffer.replace("<|im_end|>", "")
203
  time.sleep(0.01)
204
- yield buffer, buffer
205
-
206
- # Define examples for image and video inference
207
- image_examples = [
208
- ["convert this page to doc [text] precisely for markdown.", "images/1.png"],
209
- ["explain the movie shot in detail.", "images/5.jpg"],
210
- ["convert this page to doc [table] precisely for markdown.", "images/2.png"],
211
- ["explain the movie shot in detail.", "images/3.png"],
212
- ["fill the correct numbers.", "images/4.png"]
213
- ]
214
-
215
- video_examples = [
216
- ["explain the video in detail.", "videos/b.mp4"],
217
- ["explain the ad video in detail.", "videos/a.mp4"]
218
- ]
219
-
220
- # Updated CSS with model choice highlighting
221
- css = """
222
- .submit-btn {
223
- background-color: #2980b9 !important;
224
- color: white !important;
225
- }
226
- .submit-btn:hover {
227
- background-color: #3498db !important;
228
- }
229
- .canvas-output {
230
- border: 2px solid #4682B4;
231
- border-radius: 10px;
232
- padding: 20px;
233
- }
234
- """
235
-
236
- # Create the Gradio Interface
237
- with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
238
- gr.Markdown("# **[Multimodal VLM v1.0](https://huggingface.co/collections/prithivMLmods/multimodal-implementations-67c9982ea04b39f0608badb0)**")
239
- with gr.Row():
240
- with gr.Column():
241
- with gr.Tabs():
242
- with gr.TabItem("Image Inference"):
243
- image_query = gr.Textbox(label="Query Input", placeholder="✦︎ Enter your query here...")
244
- image_upload = gr.Image(type="pil", label="Image", height=290)
245
- image_submit = gr.Button("Submit", elem_classes="submit-btn")
246
- gr.Examples(
247
- examples=image_examples,
248
- inputs=[image_query, image_upload]
249
- )
250
- with gr.TabItem("Video Inference"):
251
- video_query = gr.Textbox(label="Query Input", placeholder="✦︎ Enter your query here...")
252
- video_upload = gr.Video(label="Video", height=290)
253
- video_submit = gr.Button("Submit", elem_classes="submit-btn")
254
- gr.Examples(
255
- examples=video_examples,
256
- inputs=[video_query, video_upload]
257
- )
258
- with gr.Accordion("Advanced options", open=False):
259
- max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
260
- temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
261
- top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9)
262
- top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
263
- repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
264
- with gr.Column():
265
- with gr.Column(elem_classes="canvas-output"):
266
- gr.Markdown("## Output")
267
- output = gr.Textbox(label="Raw Output Stream", interactive=False, lines=5)
268
- with gr.Accordion("(Result.md)", open=False):
269
- markdown_output = gr.Markdown(label="(Result.md)")
270
- model_choice = gr.Radio(
271
- choices=["Camel-Doc-OCR-062825", "MinerU2.5-2509", "Video-MTR", "ViLaSR-7B"],
272
- label="Select Model",
273
- value="Camel-Doc-OCR-062825"
274
- )
275
- gr.Markdown("**Model Info 💻** | [Report Bug](https://huggingface.co/spaces/prithivMLmods/Multimodal-VLM-v1.0/discussions)")
276
-
277
- gr.Markdown("> [Camel-Doc-OCR-062825](https://huggingface.co/prithivMLmods/Camel-Doc-OCR-062825) is a Qwen2.5-VL-7B-Instruct finetune, highly optimized for document retrieval, structured extraction, analysis, and direct Markdown generation from images and PDFs.")
278
- gr.Markdown("> [MinerU2.5-2509](https://huggingface.co/opendatalab/MinerU2.5-2509-1.2B) is a 1.2B-parameter vision-language model for document parsing that achieves state-of-the-art accuracy with high computational efficiency by adopting a two-stage parsing strategy.")
279
- gr.Markdown("> [ViLaSR-7B](https://huggingface.co/inclusionAI/ViLaSR) focuses on reinforcing spatial reasoning in visual-language tasks by combining interwoven thinking with visual drawing, making it especially suited for spatial reasoning and complex tip-based queries.")
280
- gr.Markdown("> [Video-MTR](https://huggingface.co/Phoebe13/Video-MTR) introduces reinforced multi-turn reasoning for long-form video understanding, enabling iterative key segment selection and deeper question comprehension.")
281
-
282
- gr.Markdown("> ✋ ViLaSR-7B - demo only supports text-only reasoning, which doesn't reflect the full behavior of the model and may underrepresent its capabilities.")
283
- gr.Markdown("> ⚠️ Note: Models in this space may not perform well on video inference tasks.")
284
- # Define the submit button actions
285
- image_submit.click(fn=generate_image,
286
- inputs=[
287
- model_choice, image_query, image_upload,
288
- max_new_tokens, temperature, top_p, top_k,
289
- repetition_penalty
290
- ],
291
- outputs=[output, markdown_output])
292
- video_submit.click(fn=generate_video,
293
- inputs=[
294
- model_choice, video_query, video_upload,
295
- max_new_tokens, temperature, top_p, top_k,
296
- repetition_penalty
297
- ],
298
- outputs=[output, markdown_output])
299
 
300
  if __name__ == "__main__":
301
- demo.queue(max_size=40).launch(share=True, mcp_server=True, ssr_mode=False, show_error=True)
 
 
1
+ import spaces
2
+ import json
3
+ import math
4
  import os
5
+ import traceback
6
+ from io import BytesIO
7
+ from typing import Any, Dict, List, Optional, Tuple
8
+ import re
9
  import time
10
+ from threading import Thread
11
+ from io import BytesIO
12
+ import uuid
13
+ import tempfile
14
+
15
  import gradio as gr
16
+ import requests
17
  import torch
 
18
  from PIL import Image
19
+ import fitz
20
+ import numpy as np
21
+
22
+
23
  from transformers import (
24
  Qwen2_5_VLForConditionalGeneration,
25
  Qwen2VLForConditionalGeneration,
26
+ AutoModelForCausalLM,
27
  AutoProcessor,
28
  TextIteratorStreamer,
29
+ AutoTokenizer,
30
  )
 
31
 
32
+ from transformers.image_utils import load_image as hf_load_image
 
 
 
33
 
34
+ from reportlab.lib.pagesizes import A4
35
+ from reportlab.lib.styles import getSampleStyleSheet
36
+ from reportlab.platypus import SimpleDocTemplate, Image as RLImage, Paragraph, Spacer
37
+ from reportlab.lib.units import inch
38
 
39
+ # --- Constants and Model Setup ---
40
+ MAX_INPUT_TOKEN_LENGTH = 4096
41
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
42
+
43
+ print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
44
+ print("torch.__version__ =", torch.__version__)
45
+ print("torch.version.cuda =", torch.version.cuda)
46
+ print("cuda available:", torch.cuda.is_available())
47
+ print("cuda device count:", torch.cuda.device_count())
48
+ if torch.cuda.is_available():
49
+ print("current device:", torch.cuda.current_device())
50
+ print("device name:", torch.cuda.get_device_name(torch.cuda.current_device()))
51
+
52
+ print("Using device:", device)
53
+
54
+ # --- Model Loading ---
55
  # Load Camel-Doc-OCR-062825
56
  MODEL_ID_M = "prithivMLmods/Camel-Doc-OCR-062825"
57
  processor_m = AutoProcessor.from_pretrained(MODEL_ID_M, trust_remote_code=True)
 
79
  torch_dtype=torch.float16
80
  ).to(device).eval()
81
 
82
+ # moondream3
83
+ MODEL_ID_MD3 = "moondream/moondream3-preview"
84
+ model_md3 = AutoModelForCausalLM.from_pretrained(
85
+ MODEL_ID_MD3,
 
86
  trust_remote_code=True,
87
+ torch_dtype=torch.bfloat16,
88
+ device_map={"": "cuda"},
89
+ )
90
+ tokenizer_md3 = AutoTokenizer.from_pretrained(MODEL_ID_MD3)
91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
+ # --- PDF Generation and Preview Utility Function ---
94
+ def generate_and_preview_pdf(image: Image.Image, text_content: str, font_size: int, line_spacing: float, alignment: str, image_size: str):
 
 
 
 
 
95
  """
96
+ Generates a PDF, saves it, and then creates image previews of its pages.
97
+ Returns the path to the PDF and a list of paths to the preview images.
98
  """
99
+ if image is None or not text_content or not text_content.strip():
100
+ raise gr.Error("Cannot generate PDF. Image or text content is missing.")
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
+ # --- 1. Generate the PDF ---
103
+ temp_dir = tempfile.gettempdir()
104
+ pdf_filename = os.path.join(temp_dir, f"output_{uuid.uuid4()}.pdf")
105
+ doc = SimpleDocTemplate(
106
+ pdf_filename,
107
+ pagesize=A4,
108
+ rightMargin=inch, leftMargin=inch,
109
+ topMargin=inch, bottomMargin=inch
110
+ )
111
+ styles = getSampleStyleSheet()
112
+ style_normal = styles["Normal"]
113
+ style_normal.fontSize = int(font_size)
114
+ style_normal.leading = int(font_size) * line_spacing
115
+ style_normal.alignment = {"Left": 0, "Center": 1, "Right": 2, "Justified": 4}[alignment]
116
 
117
+ story = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
+ img_buffer = BytesIO()
120
+ image.save(img_buffer, format='PNG')
121
+ img_buffer.seek(0)
122
+
123
+ page_width, _ = A4
124
+ available_width = page_width - 2 * inch
125
+ image_widths = {
126
+ "Small": available_width * 0.3,
127
+ "Medium": available_width * 0.6,
128
+ "Large": available_width * 0.9,
129
+ }
130
+ img_width = image_widths[image_size]
131
+ img = RLImage(img_buffer, width=img_width, height=image.height * (img_width / image.width))
132
+ story.append(img)
133
+ story.append(Spacer(1, 12))
134
+
135
+ cleaned_text = re.sub(r'#+\s*', '', text_content).replace("*", "")
136
+ text_paragraphs = cleaned_text.split('\n')
137
+
138
+ for para in text_paragraphs:
139
+ if para.strip():
140
+ story.append(Paragraph(para, style_normal))
141
+
142
+ doc.build(story)
143
+
144
+ # --- 2. Render PDF pages as images for preview ---
145
+ preview_images = []
146
+ try:
147
+ pdf_doc = fitz.open(pdf_filename)
148
+ for page_num in range(len(pdf_doc)):
149
+ page = pdf_doc.load_page(page_num)
150
+ pix = page.get_pixmap(dpi=150)
151
+ preview_img_path = os.path.join(temp_dir, f"preview_{uuid.uuid4()}_p{page_num}.png")
152
+ pix.save(preview_img_path)
153
+ preview_images.append(preview_img_path)
154
+ pdf_doc.close()
155
+ except Exception as e:
156
+ print(f"Error generating PDF preview: {e}")
157
+
158
+ return pdf_filename, preview_images
159
+
160
+
161
+ # --- Core Application Logic ---
162
  @spaces.GPU
163
+ def process_document_stream(
164
+ model_name: str,
165
+ image: Image.Image,
166
+ prompt_input: str,
167
+ max_new_tokens: int,
168
+ temperature: float,
169
+ top_p: float,
170
+ top_k: int,
171
+ repetition_penalty: float
172
+ ):
173
  """
174
+ Main generator function that handles model inference tasks with advanced generation parameters.
175
  """
176
+ if image is None:
177
+ yield "Please upload an image.", ""
 
 
 
 
 
 
 
 
 
 
 
 
178
  return
179
+ if not prompt_input or not prompt_input.strip():
180
+ yield "Please enter a prompt.", ""
 
181
  return
182
 
183
+ # --- Special Handling for Moondream3 ---
184
+ if model_name == "Moondream3":
185
+ prompt_full = f"<image>\n{prompt_input}"
186
+ answer = model_md3.answer_question(
187
+ model_md3.encode_image(image),
188
+ prompt_full,
189
+ tokenizer=tokenizer_md3
190
+ )
191
+ yield answer, answer
192
+ return
193
+
194
+ processor = None
195
+ model = None
196
+
197
+ # --- Generic Handling for all other models ---
198
+ if model_name == "Camel-Doc-OCR-062825": processor, model = processor_m, model_m
199
+ elif model_name == "MinerU2.5-2509-1.2B": processor, model = processor_t, model_t
200
+ elif model_name == "Video-MTR": processor, model = processor_s, model_s
201
+ else:
202
+ yield "Invalid model selected.", ""
203
+ return
204
+
205
+ messages = [{"role": "user", "content": [{"type": "image", "image": image}, {"type": "text", "text": prompt_input}]}]
206
+ prompt_full = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
207
+ inputs = processor(text=[prompt_full], images=[image], return_tensors="pt", padding=True, truncation=True, max_length=MAX_INPUT_TOKEN_LENGTH).to(device)
208
+
209
  streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=True)
210
+
211
  generation_kwargs = {
212
+ **inputs,
213
+ "streamer": streamer,
214
  "max_new_tokens": max_new_tokens,
 
215
  "temperature": temperature,
216
  "top_p": top_p,
217
  "top_k": top_k,
218
  "repetition_penalty": repetition_penalty,
219
+ "do_sample": True
220
  }
221
+
222
+ thread = Thread(target=model.generate, kwargs=generation_kwargs)
223
  thread.start()
224
+
225
  buffer = ""
226
  for new_text in streamer:
227
  buffer += new_text
228
  buffer = buffer.replace("<|im_end|>", "")
229
  time.sleep(0.01)
230
+ yield buffer , buffer
231
+
232
+ yield buffer, buffer
233
+
234
+
235
+ # --- Gradio UI Definition ---
236
+ def create_gradio_interface():
237
+ """Builds and returns the Gradio web interface."""
238
+ css = """
239
+ .main-container { max-width: 1400px; margin: 0 auto; }
240
+ .process-button { border: none !important; color: white !important; font-weight: bold !important; background-color: blue !important;}
241
+ .process-button:hover { background-color: darkblue !important; transform: translateY(-2px) !important; box-shadow: 0 4px 8px rgba(0,0,0,0.2) !important; }
242
+ #gallery { min-height: 400px; }
243
+ """
244
+ with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
245
+ gr.HTML("""
246
+ <div class="title" style="text-align: center">
247
+ <h1>Multimodal VLM v1.0</h1>
248
+ <p style="font-size: 1.1em; color: #6b7280; margin-bottom: 0.6em;">
249
+ Multimodal VLM for Image Content Extraction and Understanding
250
+ </p>
251
+ </div>
252
+ """)
253
+
254
+ with gr.Row():
255
+ # Left Column (Inputs)
256
+ with gr.Column(scale=1):
257
+ model_choice = gr.Dropdown(
258
+ choices=["Moondream3", "Camel-Doc-OCR-062825", "MinerU2.5-2509-1.2B", "Video-MTR"],
259
+ label="Select Model", value= "Moondream3"
260
+ )
261
+
262
+ prompt_input = gr.Textbox(label="Query Input", placeholder="✦︎ Enter the prompt")
263
+ image_input = gr.Image(label="Upload Image", type="pil", sources=['upload'])
264
+
265
+ with gr.Accordion("Advanced Settings (PDF)", open=False):
266
+ max_new_tokens = gr.Slider(minimum=512, maximum=8192, value=2048, step=256, label="Max New Tokens")
267
+ temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
268
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9)
269
+ top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
270
+ repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
271
+
272
+ gr.Markdown("### PDF Export Settings")
273
+ font_size = gr.Dropdown(choices=["8", "10", "12", "14", "16", "18"], value="12", label="Font Size")
274
+ line_spacing = gr.Dropdown(choices=[1.0, 1.15, 1.5, 2.0], value=1.15, label="Line Spacing")
275
+ alignment = gr.Dropdown(choices=["Left", "Center", "Right", "Justified"], value="Justified", label="Text Alignment")
276
+ image_size = gr.Dropdown(choices=["Small", "Medium", "Large"], value="Medium", label="Image Size in PDF")
277
+
278
+ process_btn = gr.Button("🚀 Process Image", variant="primary", elem_classes=["process-button"], size="lg")
279
+ clear_btn = gr.Button("🗑️ Clear All", variant="secondary")
280
+
281
+ # Right Column (Outputs)
282
+ with gr.Column(scale=2):
283
+ with gr.Tabs() as tabs:
284
+ with gr.Tab("📝 Extracted Content"):
285
+ raw_output_stream = gr.Textbox(label="Raw Model Output Stream", interactive=False, lines=15, show_copy_button=True)
286
+ with gr.Row():
287
+ examples = gr.Examples(
288
+ examples=["examples/1.png", "examples/2.png", "examples/3.png",
289
+ "examples/4.png", "examples/5.png"],
290
+ inputs=image_input, label="Examples"
291
+ )
292
+ gr.Markdown("[Report-Bug💻](https://huggingface.co/spaces/prithivMLmods/Tiny-VLMs-Lab/discussions) | [prithivMLmods🤗](https://huggingface.co/prithivMLmods)")
293
+
294
+ with gr.Tab("📰 README.md"):
295
+ with gr.Accordion("(Result.md)", open=True):
296
+ markdown_output = gr.Markdown()
297
+
298
+ with gr.Tab("📋 PDF Preview"):
299
+ generate_pdf_btn = gr.Button("📄 Generate PDF & Render", variant="primary")
300
+ pdf_output_file = gr.File(label="Download Generated PDF", interactive=False)
301
+ pdf_preview_gallery = gr.Gallery(label="PDF Page Preview", show_label=True, elem_id="gallery", columns=2, object_fit="contain", height="auto")
302
+
303
+ # Event Handlers
304
+ def clear_all_outputs():
305
+ return None, "", "Raw output will appear here.", "", None, None
306
+
307
+ process_btn.click(
308
+ fn=process_document_stream,
309
+ inputs=[model_choice, image_input, prompt_input, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
310
+ outputs=[raw_output_stream, markdown_output]
311
+ )
312
+
313
+ generate_pdf_btn.click(
314
+ fn=generate_and_preview_pdf,
315
+ inputs=[image_input, raw_output_stream, font_size, line_spacing, alignment, image_size],
316
+ outputs=[pdf_output_file, pdf_preview_gallery]
317
+ )
318
+
319
+ clear_btn.click(
320
+ clear_all_outputs,
321
+ outputs=[image_input, prompt_input, raw_output_stream, markdown_output, pdf_output_file, pdf_preview_gallery]
322
+ )
323
+ return demo
 
324
 
325
  if __name__ == "__main__":
326
+ demo = create_gradio_interface()
327
+ demo.queue(max_size=50).launch(share=True, mcp_server=True, ssr_mode=False, show_error=True)