Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -32,12 +32,18 @@ if not os.path.exists(app.config['RESULT_FOLDER']):
|
|
| 32 |
os.makedirs(app.config['RESULT_FOLDER'])
|
| 33 |
|
| 34 |
# Set the PaddleOCR home directory to a writable location
|
| 35 |
-
#os.environ['PADDLEOCR_HOME'] = os.path.join(app.config['UPLOAD_FOLDER'], '.paddleocr') # Change made here
|
| 36 |
os.environ['PADDLEOCR_HOME'] = '/tmp/.paddleocr'
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
@app.route('/')
|
| 39 |
def index():
|
| 40 |
-
uploaded_files = session.get('uploaded_files', [])
|
| 41 |
logging.info(f"Accessed index page, uploaded files: {uploaded_files}")
|
| 42 |
return render_template('index.html', uploaded_files=uploaded_files)
|
| 43 |
|
|
@@ -48,38 +54,38 @@ def upload_file():
|
|
| 48 |
logging.warning("No file part found in the request")
|
| 49 |
return redirect(request.url)
|
| 50 |
|
| 51 |
-
files = request.files.getlist('files')
|
| 52 |
if not files or all(file.filename == '' for file in files):
|
| 53 |
flash('No selected files')
|
| 54 |
logging.warning("No files selected for upload")
|
| 55 |
return redirect(request.url)
|
| 56 |
|
| 57 |
-
uploaded_files = session.get('uploaded_files', [])
|
| 58 |
for file in files:
|
| 59 |
if file:
|
| 60 |
filename = file.filename
|
| 61 |
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
|
| 62 |
file.save(file_path)
|
| 63 |
-
uploaded_files.append(filename)
|
| 64 |
-
logging.info(f"Uploaded file: {filename}")
|
| 65 |
|
| 66 |
-
session['uploaded_files'] = uploaded_files
|
| 67 |
flash('Files successfully uploaded')
|
| 68 |
logging.info(f"Files successfully uploaded: {uploaded_files}")
|
| 69 |
return redirect(url_for('index'))
|
| 70 |
|
| 71 |
@app.route('/remove_file')
|
| 72 |
def remove_file():
|
| 73 |
-
uploaded_files = session.get('uploaded_files', [])
|
| 74 |
for filename in uploaded_files:
|
| 75 |
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
|
| 76 |
-
if os.path.exists(file_path):
|
| 77 |
os.remove(file_path)
|
| 78 |
logging.info(f"Removed file: {filename}")
|
| 79 |
else:
|
| 80 |
-
logging.warning(f"File not found for removal: {
|
| 81 |
|
| 82 |
-
session.pop('uploaded_files', None)
|
| 83 |
flash('Files successfully removed')
|
| 84 |
logging.info("All uploaded files removed")
|
| 85 |
return redirect(url_for('index'))
|
|
@@ -92,20 +98,17 @@ def process_file():
|
|
| 92 |
logging.warning("No files selected for processing")
|
| 93 |
return redirect(url_for('index'))
|
| 94 |
|
| 95 |
-
# Create a list of file paths for the extracted text function
|
| 96 |
file_paths = [os.path.join(app.config['UPLOAD_FOLDER'], filename) for filename in uploaded_files]
|
| 97 |
logging.info(f"Processing files: {file_paths}")
|
| 98 |
|
| 99 |
-
extracted_text = {}
|
| 100 |
-
processed_Img = {}
|
| 101 |
|
| 102 |
try:
|
| 103 |
-
# Extract text from all images
|
| 104 |
extracted_text, processed_Img = extract_text_from_images(file_paths, RESULT_FOLDER)
|
| 105 |
logging.info(f"Extracted text: {extracted_text}")
|
| 106 |
logging.info(f"Processed images: {processed_Img}")
|
| 107 |
|
| 108 |
-
# Call the Gemma model API and get the professional data
|
| 109 |
llmText = json_to_llm_str(extracted_text)
|
| 110 |
logging.info(f"LLM text: {llmText}")
|
| 111 |
|
|
@@ -116,7 +119,7 @@ def process_file():
|
|
| 116 |
logging.error(f"Error during LLM processing: {e}")
|
| 117 |
logging.info("Running backup model...")
|
| 118 |
|
| 119 |
-
LLMdata = {}
|
| 120 |
|
| 121 |
if extracted_text:
|
| 122 |
text = json_to_llm_str(extracted_text)
|
|
@@ -125,25 +128,22 @@ def process_file():
|
|
| 125 |
else:
|
| 126 |
logging.warning("No extracted text available for backup model")
|
| 127 |
|
| 128 |
-
# Processing results
|
| 129 |
cont_data = process_extracted_text(extracted_text)
|
| 130 |
logging.info(f"Contextual data: {cont_data}")
|
| 131 |
-
|
| 132 |
-
# Storing the parsed results
|
| 133 |
processed_data = process_resume_data(LLMdata, cont_data, extracted_text)
|
| 134 |
logging.info(f"Processed data: {processed_data}")
|
| 135 |
|
| 136 |
-
# Store processed data and images in session
|
| 137 |
session['processed_data'] = processed_data
|
| 138 |
-
session['processed_Img'] = processed_Img
|
| 139 |
flash('Data processed and analyzed successfully')
|
| 140 |
logging.info("Data processed and analyzed successfully")
|
| 141 |
return redirect(url_for('result'))
|
| 142 |
|
| 143 |
@app.route('/result')
|
| 144 |
def result():
|
| 145 |
-
processed_data = session.get('processed_data', {})
|
| 146 |
-
processed_Img = session.get('processed_Img', {})
|
| 147 |
logging.info(f"Displaying results: Data - {processed_data}, Images - {processed_Img}")
|
| 148 |
return render_template('result.html', data=processed_data, Img=processed_Img)
|
| 149 |
|
|
|
|
| 32 |
os.makedirs(app.config['RESULT_FOLDER'])
|
| 33 |
|
| 34 |
# Set the PaddleOCR home directory to a writable location
|
|
|
|
| 35 |
os.environ['PADDLEOCR_HOME'] = '/tmp/.paddleocr'
|
| 36 |
|
| 37 |
+
# Check if PaddleOCR home directory is writable
|
| 38 |
+
if not os.path.exists('/tmp/.paddleocr'):
|
| 39 |
+
os.makedirs('/tmp/.paddleocr', exist_ok=True)
|
| 40 |
+
logging.info("Created PaddleOCR home directory.")
|
| 41 |
+
else:
|
| 42 |
+
logging.info("PaddleOCR home directory exists.")
|
| 43 |
+
|
| 44 |
@app.route('/')
|
| 45 |
def index():
|
| 46 |
+
uploaded_files = session.get('uploaded_files', [])
|
| 47 |
logging.info(f"Accessed index page, uploaded files: {uploaded_files}")
|
| 48 |
return render_template('index.html', uploaded_files=uploaded_files)
|
| 49 |
|
|
|
|
| 54 |
logging.warning("No file part found in the request")
|
| 55 |
return redirect(request.url)
|
| 56 |
|
| 57 |
+
files = request.files.getlist('files')
|
| 58 |
if not files or all(file.filename == '' for file in files):
|
| 59 |
flash('No selected files')
|
| 60 |
logging.warning("No files selected for upload")
|
| 61 |
return redirect(request.url)
|
| 62 |
|
| 63 |
+
uploaded_files = session.get('uploaded_files', [])
|
| 64 |
for file in files:
|
| 65 |
if file:
|
| 66 |
filename = file.filename
|
| 67 |
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
|
| 68 |
file.save(file_path)
|
| 69 |
+
uploaded_files.append(filename)
|
| 70 |
+
logging.info(f"Uploaded file: {filename} at {file_path}")
|
| 71 |
|
| 72 |
+
session['uploaded_files'] = uploaded_files
|
| 73 |
flash('Files successfully uploaded')
|
| 74 |
logging.info(f"Files successfully uploaded: {uploaded_files}")
|
| 75 |
return redirect(url_for('index'))
|
| 76 |
|
| 77 |
@app.route('/remove_file')
|
| 78 |
def remove_file():
|
| 79 |
+
uploaded_files = session.get('uploaded_files', [])
|
| 80 |
for filename in uploaded_files:
|
| 81 |
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
|
| 82 |
+
if os.path.exists(file_path):
|
| 83 |
os.remove(file_path)
|
| 84 |
logging.info(f"Removed file: {filename}")
|
| 85 |
else:
|
| 86 |
+
logging.warning(f"File not found for removal: {file_path}") # More specific log
|
| 87 |
|
| 88 |
+
session.pop('uploaded_files', None)
|
| 89 |
flash('Files successfully removed')
|
| 90 |
logging.info("All uploaded files removed")
|
| 91 |
return redirect(url_for('index'))
|
|
|
|
| 98 |
logging.warning("No files selected for processing")
|
| 99 |
return redirect(url_for('index'))
|
| 100 |
|
|
|
|
| 101 |
file_paths = [os.path.join(app.config['UPLOAD_FOLDER'], filename) for filename in uploaded_files]
|
| 102 |
logging.info(f"Processing files: {file_paths}")
|
| 103 |
|
| 104 |
+
extracted_text = {}
|
| 105 |
+
processed_Img = {}
|
| 106 |
|
| 107 |
try:
|
|
|
|
| 108 |
extracted_text, processed_Img = extract_text_from_images(file_paths, RESULT_FOLDER)
|
| 109 |
logging.info(f"Extracted text: {extracted_text}")
|
| 110 |
logging.info(f"Processed images: {processed_Img}")
|
| 111 |
|
|
|
|
| 112 |
llmText = json_to_llm_str(extracted_text)
|
| 113 |
logging.info(f"LLM text: {llmText}")
|
| 114 |
|
|
|
|
| 119 |
logging.error(f"Error during LLM processing: {e}")
|
| 120 |
logging.info("Running backup model...")
|
| 121 |
|
| 122 |
+
LLMdata = {}
|
| 123 |
|
| 124 |
if extracted_text:
|
| 125 |
text = json_to_llm_str(extracted_text)
|
|
|
|
| 128 |
else:
|
| 129 |
logging.warning("No extracted text available for backup model")
|
| 130 |
|
|
|
|
| 131 |
cont_data = process_extracted_text(extracted_text)
|
| 132 |
logging.info(f"Contextual data: {cont_data}")
|
| 133 |
+
|
|
|
|
| 134 |
processed_data = process_resume_data(LLMdata, cont_data, extracted_text)
|
| 135 |
logging.info(f"Processed data: {processed_data}")
|
| 136 |
|
|
|
|
| 137 |
session['processed_data'] = processed_data
|
| 138 |
+
session['processed_Img'] = processed_Img
|
| 139 |
flash('Data processed and analyzed successfully')
|
| 140 |
logging.info("Data processed and analyzed successfully")
|
| 141 |
return redirect(url_for('result'))
|
| 142 |
|
| 143 |
@app.route('/result')
|
| 144 |
def result():
|
| 145 |
+
processed_data = session.get('processed_data', {})
|
| 146 |
+
processed_Img = session.get('processed_Img', {})
|
| 147 |
logging.info(f"Displaying results: Data - {processed_data}, Images - {processed_Img}")
|
| 148 |
return render_template('result.html', data=processed_data, Img=processed_Img)
|
| 149 |
|