File size: 1,567 Bytes
6a6918c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
from ocr.inference import OCRInference
import os

# Determine the absolute path to the model
# Assuming the script is run from the root of draft_computation
# or that the ocr_placeholder.py is called in a way that its parent directory is in sys.path
# For direct import from draft_computation_app, the path needs to be relative to the project root.
# The model path in ocr/inference.py is "./ocr_model_output/checkpoint-441"
# Relative to draft_computation_app, this would be "../ocr/ocr_model_output/checkpoint-441"
# Let's make it absolute for robustness.
# Get the directory of the current script (ocr_placeholder.py)
current_script_dir = os.path.dirname(os.path.abspath(__file__))
# Navigate up to the project root (c:\Users\dev-n\OneDrive\Desktop\draft_computation)
project_root = os.path.abspath(os.path.join(current_script_dir, ".."))
# Construct the absolute path to the OCR model
OCR_MODEL_PATH = os.path.join(project_root, "ocr", "ocr_model_output", "checkpoint-441")

print(f"OCR Model Path: {OCR_MODEL_PATH}")
# Initialize the OCRInference engine globally or as a singleton if preferred
# For simplicity, initializing here. Consider lazy loading or a proper singleton pattern for production.
ocr_engine = OCRInference(model_path=OCR_MODEL_PATH)

def perform_ocr(image_input):
    """
    Performs OCR using the integrated OCRInference engine.
    Args:
        image_input: Path to the image file or a NumPy array representing the image.
    Returns:
        The predicted text from the image.
    """
    return ocr_engine.perform_inference(image_input)