Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -140,13 +140,24 @@ def predict(prompt, video_data, temperature, model, tokenizer):
|
|
| 140 |
|
| 141 |
return response
|
| 142 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
def inference(video, step_number):
|
| 144 |
"""Analyzes video to predict possible issues based on the manufacturing step."""
|
| 145 |
try:
|
| 146 |
if not video:
|
| 147 |
return "Please upload a video first."
|
| 148 |
|
| 149 |
-
prompt =
|
| 150 |
temperature = 0.8
|
| 151 |
response = predict(prompt, video, temperature, model, tokenizer)
|
| 152 |
|
|
@@ -178,10 +189,7 @@ def create_interface():
|
|
| 178 |
gr.Examples(
|
| 179 |
examples=[
|
| 180 |
["7838_step2_2_eval.mp4", "Step 2"],
|
| 181 |
-
["7838_step6_2_eval.mp4", "Step 6"]
|
| 182 |
-
["7838_step8_1_eval.mp4", "Step 8"],
|
| 183 |
-
["7993_step6_3_eval.mp4", "Step 6"],
|
| 184 |
-
["7993_step8_3_eval.mp4", "Step 8"]
|
| 185 |
],
|
| 186 |
inputs=[video, step_number],
|
| 187 |
cache_examples=False
|
|
|
|
| 140 |
|
| 141 |
return response
|
| 142 |
|
| 143 |
+
def get_analysis_prompt(step_number):
|
| 144 |
+
"""Constructs the prompt for analyzing manufacturing delays based on the selected step."""
|
| 145 |
+
return f"""You are an AI expert system specializing in manufacturing processes.
|
| 146 |
+
Your task is to analyze video footage from Step {step_number} of a tire manufacturing process and identify any issues based on the observed footage.
|
| 147 |
+
- Focus on identifying signs of delay or disruption.
|
| 148 |
+
- If no person is visible, it may indicate a staffing issue.
|
| 149 |
+
- If a person is seen modifying the tire, they may be repairing defects or handling material issues.
|
| 150 |
+
- Carefully examine for mechanical failures, material problems, or human involvement.
|
| 151 |
+
|
| 152 |
+
Provide an analysis of the video by determining the most likely cause of delay in this step, and explain why this conclusion was reached based on the visual evidence."""
|
| 153 |
+
|
| 154 |
def inference(video, step_number):
|
| 155 |
"""Analyzes video to predict possible issues based on the manufacturing step."""
|
| 156 |
try:
|
| 157 |
if not video:
|
| 158 |
return "Please upload a video first."
|
| 159 |
|
| 160 |
+
prompt = get_analysis_prompt(step_number)
|
| 161 |
temperature = 0.8
|
| 162 |
response = predict(prompt, video, temperature, model, tokenizer)
|
| 163 |
|
|
|
|
| 189 |
gr.Examples(
|
| 190 |
examples=[
|
| 191 |
["7838_step2_2_eval.mp4", "Step 2"],
|
| 192 |
+
["7838_step6_2_eval.mp4", "Step 6"]
|
|
|
|
|
|
|
|
|
|
| 193 |
],
|
| 194 |
inputs=[video, step_number],
|
| 195 |
cache_examples=False
|