Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import torch | |
| from transformers import TableTransformerForObjectDetection | |
| import matplotlib.pyplot as plt | |
| from transformers import DetrFeatureExtractor | |
| import pandas as pd | |
| import uuid | |
| from surya.ocr import run_ocr | |
| # from surya.model.detection.segformer import load_model as load_det_model, load_processor as load_det_processor | |
| from surya.model.detection.model import load_model as load_det_model, load_processor as load_det_processor | |
| from surya.model.recognition.model import load_model as load_rec_model | |
| from surya.model.recognition.processor import load_processor as load_rec_processor | |
| from PIL import ImageDraw, Image | |
| import os | |
| from pdf2image import convert_from_path | |
| import tempfile | |
| from ultralyticsplus import YOLO, render_result | |
| import cv2 | |
| import numpy as np | |
| from fpdf import FPDF | |
| def convert_pdf_images(pdf_path): | |
| # Convert PDF to images | |
| images = convert_from_path(pdf_path) | |
| # Save each page as a temporary image and collect file paths | |
| temp_file_paths = [] | |
| for i, page in enumerate(images): | |
| # Create a temporary file with a unique name | |
| temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png") | |
| page.save(temp_file.name, 'PNG') # Save the image to the temporary file | |
| temp_file_paths.append(temp_file.name) # Add file path to the list | |
| return temp_file_paths[0] # Return the list of temporary file paths | |
| # Load model | |
| model_yolo = YOLO('keremberke/yolov8m-table-extraction') | |
| # Set model parameters | |
| model_yolo.overrides['conf'] = 0.25 # NMS confidence threshold | |
| model_yolo.overrides['iou'] = 0.45 # NMS IoU threshold | |
| model_yolo.overrides['agnostic_nms'] = False # NMS class-agnostic | |
| model_yolo.overrides['max_det'] = 1000 # maximum number of detections per image | |
| # new v1.1 checkpoints require no timm anymore | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| langs = ["en","th"] # Replace with your languages - optional but recommended | |
| det_processor, det_model = load_det_processor(), load_det_model() | |
| rec_model, rec_processor = load_rec_model(), load_rec_processor() | |
| feature_extractor = DetrFeatureExtractor() | |
| model = TableTransformerForObjectDetection.from_pretrained("microsoft/table-transformer-structure-recognition-v1.1-all") | |
| def crop_table(filename): | |
| # Set image | |
| image_path = filename | |
| image = Image.open(image_path) | |
| image_np = np.array(image) | |
| # Perform inference | |
| results = model_yolo.predict(image_path) | |
| # Extract the first bounding box (assuming there's only one table) | |
| bbox = results[0].boxes[0] | |
| x1, y1, x2, y2 = map(int, bbox.xyxy[0]) # Get the bounding box coordinates | |
| # Crop the image using the bounding box coordinates | |
| cropped_image = image_np[y1:y2, x1:x2] | |
| # Convert the cropped image to RGB (if it's not already in RGB) | |
| cropped_image_rgb = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB) | |
| # Save the cropped image as a PDF | |
| cropped_image_pil = Image.fromarray(cropped_image_rgb) | |
| # Save the cropped image to a temporary file | |
| temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png") | |
| cropped_image_pil.save(temp_file.name) | |
| return temp_file.name | |
| def extract_table(image_path): | |
| image = Image.open(image_path) | |
| predictions = run_ocr([image], [langs], det_model, det_processor, rec_model, rec_processor) | |
| objs = [] | |
| for t in predictions[0].text_lines: | |
| objs.append([t.polygon,t.confidence,t.text,t.bbox]) | |
| # Sort objects by their y-coordinate to facilitate row separation | |
| objs = sorted(objs, key=lambda x: x[3][1]) | |
| # Initialize lists to store rows and column boundaries | |
| rows = [] | |
| row_threshold = 5 # Adjust as needed to separate rows based on y-coordinates | |
| column_boundaries = [] | |
| # First pass to determine approximate column boundaries based on x-coordinates | |
| for obj in objs: | |
| x_min = obj[3][0] # x-coordinate of the left side of the bounding box | |
| if not any(abs(x - x_min) < 10 for x in column_boundaries): | |
| column_boundaries.append(x_min) | |
| # Sort column boundaries to ensure proper left-to-right order | |
| column_boundaries.sort() | |
| # Second pass to organize text by rows and columns | |
| current_row = [] | |
| previous_y = None | |
| for obj in objs: | |
| bbox = obj[3] | |
| text = obj[2] | |
| # Check if the current item belongs to a new row based on y-coordinate | |
| if previous_y is None or abs(bbox[1] - previous_y) > row_threshold: | |
| # Add the completed row to the list if it's not empty | |
| if current_row: | |
| rows.append(current_row) | |
| current_row = [''] * len(column_boundaries) # Initialize new row with placeholders | |
| # Find the appropriate column for the current text based on x-coordinate | |
| for col_index, x_bound in enumerate(column_boundaries): | |
| if abs(bbox[0] - x_bound) < 10: # Adjust threshold as necessary | |
| current_row[col_index] = text | |
| break | |
| previous_y = bbox[1] | |
| # Add the last row if it's not empty | |
| if current_row: | |
| rows.append(current_row) | |
| # Create DataFrame from rows | |
| df = pd.DataFrame(rows) | |
| df.columns = df.iloc[0] | |
| df = df.iloc[1:] | |
| # Save DataFrame to an CSV file | |
| csv_path = f'{uuid.uuid4()}.csv' | |
| df.to_csv(csv_path,index=False) | |
| # Save table_with_bbox_path | |
| table_with_bbox_path = f"{uuid.uuid4()}.png" | |
| for obj in objs: | |
| # draw bbox on image | |
| draw = ImageDraw.Draw(image) | |
| draw.rectangle(obj[3], outline='red', width=1) | |
| image.save(table_with_bbox_path) | |
| return csv_path,table_with_bbox_path | |
| # Function to process the uploaded file | |
| def process_file(uploaded_file): | |
| images_table = convert_pdf_images(uploaded_file) | |
| croped_table = crop_table(images_table) | |
| filepath,bbox_table= extract_table(croped_table) | |
| os.remove(images_table) | |
| os.remove(croped_table) | |
| return filepath, bbox_table # Return the file path for download | |
| # Function to clear the inputs and outputs | |
| def clear_inputs(): | |
| return None, None, None # Clear both input and output | |
| # Define the Gradio interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## Upload a PDF, Process it, and Download the Processed File") | |
| with gr.Row(): | |
| upload = gr.File(label="Upload PDF", type="filepath", file_types=[".pdf"]) | |
| download = gr.File(label="Download Processed PDF") | |
| with gr.Row(): | |
| process_button = gr.Button("Process") | |
| clear_button = gr.Button("Clear") # Custom clear button | |
| image_display = gr.Image(label="Processed Image") | |
| # Trigger the file processing with the button click | |
| process_button.click(process_file, inputs=upload, outputs=[download, image_display]) | |
| # Trigger clearing inputs and outputs | |
| clear_button.click(clear_inputs, inputs=None, outputs=[upload, download, image_display]) | |
| # Launch the interface | |
| demo.launch() | |
| # print(process_file("/content/ขอ ตารางกริยาช่องที่ 1 ในภาษาไทย (กริยาคำกริยา) ซ... - ขอ ตารางกริยาช่องที่ 1 ในภาษาไทย (กริย.pdf")) |