Spaces:
Runtime error
Runtime error
refactor: update import paths for forensics utilities and enhance logging functionality in inference data
Browse files- app.backup.py +5 -5
- app_mcp.py +6 -6
- app_test.py +34 -7
- forensics/__init__.py +15 -0
- {utils β forensics}/bitplane.py +0 -0
- {utils β forensics}/ela.py +1 -1
- {utils β forensics}/exif.py +0 -0
- {utils β forensics}/gradient.py +1 -1
- {utils β forensics}/minmax.py +1 -1
- {utils β forensics}/wavelet.py +0 -0
- requirements.txt +32 -16
- utils/hf_logger.py +17 -1
- {forensics β utils}/registry.py +0 -0
app.backup.py
CHANGED
|
@@ -13,10 +13,10 @@ import numpy as np
|
|
| 13 |
import io
|
| 14 |
import logging
|
| 15 |
from utils.utils import softmax, augment_image, convert_pil_to_bytes
|
| 16 |
-
from
|
| 17 |
-
from
|
| 18 |
-
from
|
| 19 |
-
from
|
| 20 |
|
| 21 |
# Configure logging
|
| 22 |
logging.basicConfig(level=logging.INFO)
|
|
@@ -276,7 +276,7 @@ def predict_image_with_html(img, confidence_threshold, augment_methods, rotate_d
|
|
| 276 |
img_np_og = np.array(img) # Convert PIL Image to NumPy array
|
| 277 |
|
| 278 |
gradient_image = gradient_processing(img_np) # Added gradient processing
|
| 279 |
-
minmax_image =
|
| 280 |
|
| 281 |
# First pass - standard analysis
|
| 282 |
ela1 = ELA(img_np_og, quality=75, scale=50, contrast=20, linear=False, grayscale=True)
|
|
|
|
| 13 |
import io
|
| 14 |
import logging
|
| 15 |
from utils.utils import softmax, augment_image, convert_pil_to_bytes
|
| 16 |
+
from forensics.gradient import gradient_processing
|
| 17 |
+
from forensics.minmax import minmax_process
|
| 18 |
+
from forensics.ela import ELA
|
| 19 |
+
from forensics.wavelet import wavelet_blocking_noise_estimation
|
| 20 |
|
| 21 |
# Configure logging
|
| 22 |
logging.basicConfig(level=logging.INFO)
|
|
|
|
| 276 |
img_np_og = np.array(img) # Convert PIL Image to NumPy array
|
| 277 |
|
| 278 |
gradient_image = gradient_processing(img_np) # Added gradient processing
|
| 279 |
+
minmax_image = minmax_process(img_np) # Added MinMax processing
|
| 280 |
|
| 281 |
# First pass - standard analysis
|
| 282 |
ela1 = ELA(img_np_og, quality=75, scale=50, contrast=20, linear=False, grayscale=True)
|
app_mcp.py
CHANGED
|
@@ -13,17 +13,17 @@ import numpy as np
|
|
| 13 |
import io
|
| 14 |
import logging
|
| 15 |
from utils.utils import softmax, augment_image, convert_pil_to_bytes
|
| 16 |
-
from
|
| 17 |
-
from
|
| 18 |
-
from
|
| 19 |
-
from
|
| 20 |
-
from
|
| 21 |
from utils.hf_logger import log_inference_data
|
| 22 |
from utils.text_content import QUICK_INTRO, IMPLEMENTATION
|
| 23 |
from agents.ensemble_team import EnsembleMonitorAgent, WeightOptimizationAgent, SystemHealthAgent
|
| 24 |
from agents.smart_agents import ContextualIntelligenceAgent, ForensicAnomalyDetectionAgent
|
| 25 |
|
| 26 |
-
from
|
| 27 |
from agents.ensemble_weights import ModelWeightManager
|
| 28 |
from dotenv import load_dotenv
|
| 29 |
import json
|
|
|
|
| 13 |
import io
|
| 14 |
import logging
|
| 15 |
from utils.utils import softmax, augment_image, convert_pil_to_bytes
|
| 16 |
+
from forensics.gradient import gradient_processing
|
| 17 |
+
from forensics.minmax import minmax_process
|
| 18 |
+
from forensics.ela import ELA
|
| 19 |
+
from forensics.wavelet import wavelet_blocking_noise_estimation
|
| 20 |
+
from forensics.bitplane import bit_plane_extractor
|
| 21 |
from utils.hf_logger import log_inference_data
|
| 22 |
from utils.text_content import QUICK_INTRO, IMPLEMENTATION
|
| 23 |
from agents.ensemble_team import EnsembleMonitorAgent, WeightOptimizationAgent, SystemHealthAgent
|
| 24 |
from agents.smart_agents import ContextualIntelligenceAgent, ForensicAnomalyDetectionAgent
|
| 25 |
|
| 26 |
+
from utils.registry import register_model, MODEL_REGISTRY, ModelEntry
|
| 27 |
from agents.ensemble_weights import ModelWeightManager
|
| 28 |
from dotenv import load_dotenv
|
| 29 |
import json
|
app_test.py
CHANGED
|
@@ -9,15 +9,15 @@ import logging
|
|
| 9 |
# Assuming these are available from your utils and agents directories
|
| 10 |
# You might need to adjust paths or copy these functions/classes if they are not directly importable.
|
| 11 |
from utils.utils import softmax, augment_image
|
| 12 |
-
from
|
| 13 |
-
from
|
| 14 |
-
from
|
| 15 |
-
from
|
| 16 |
-
from
|
| 17 |
from utils.hf_logger import log_inference_data
|
| 18 |
from agents.ensemble_team import EnsembleMonitorAgent, WeightOptimizationAgent, SystemHealthAgent
|
| 19 |
from agents.smart_agents import ContextualIntelligenceAgent, ForensicAnomalyDetectionAgent
|
| 20 |
-
from
|
| 21 |
from agents.ensemble_weights import ModelWeightManager
|
| 22 |
from transformers import pipeline, AutoImageProcessor, SwinForImageClassification, Swinv2ForImageClassification, AutoFeatureExtractor, AutoModelForImageClassification
|
| 23 |
from torchvision import transforms
|
|
@@ -32,7 +32,7 @@ logger = logging.getLogger(__name__)
|
|
| 32 |
os.environ['HF_HUB_CACHE'] = './models'
|
| 33 |
|
| 34 |
LOCAL_LOG_DIR = "./hf_inference_logs"
|
| 35 |
-
HF_DATASET_NAME="degentic_rd0"
|
| 36 |
load_dotenv()
|
| 37 |
|
| 38 |
# Custom JSON Encoder to handle numpy types
|
|
@@ -166,7 +166,18 @@ register_model_with_metadata(
|
|
| 166 |
display_name="ViT", contributor="temp", model_path=MODEL_PATHS["model_7"]
|
| 167 |
)
|
| 168 |
|
|
|
|
| 169 |
def infer(image: Image.Image, model_id: str, confidence_threshold: float = 0.75) -> dict:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 170 |
entry = MODEL_REGISTRY[model_id]
|
| 171 |
img = entry.preprocess(image)
|
| 172 |
try:
|
|
@@ -194,6 +205,22 @@ def infer(image: Image.Image, model_id: str, confidence_threshold: float = 0.75)
|
|
| 194 |
}
|
| 195 |
|
| 196 |
def predict_with_ensemble(img, confidence_threshold, augment_methods, rotate_degrees, noise_level, sharpen_strength):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 197 |
if not isinstance(img, Image.Image):
|
| 198 |
try:
|
| 199 |
img = Image.fromarray(img)
|
|
|
|
| 9 |
# Assuming these are available from your utils and agents directories
|
| 10 |
# You might need to adjust paths or copy these functions/classes if they are not directly importable.
|
| 11 |
from utils.utils import softmax, augment_image
|
| 12 |
+
from forensics.gradient import gradient_processing
|
| 13 |
+
from forensics.minmax import minmax_process
|
| 14 |
+
from forensics.ela import ELA
|
| 15 |
+
from forensics.wavelet import wavelet_blocking_noise_estimation
|
| 16 |
+
from forensics.bitplane import bit_plane_extractor
|
| 17 |
from utils.hf_logger import log_inference_data
|
| 18 |
from agents.ensemble_team import EnsembleMonitorAgent, WeightOptimizationAgent, SystemHealthAgent
|
| 19 |
from agents.smart_agents import ContextualIntelligenceAgent, ForensicAnomalyDetectionAgent
|
| 20 |
+
from utils.registry import register_model, MODEL_REGISTRY, ModelEntry
|
| 21 |
from agents.ensemble_weights import ModelWeightManager
|
| 22 |
from transformers import pipeline, AutoImageProcessor, SwinForImageClassification, Swinv2ForImageClassification, AutoFeatureExtractor, AutoModelForImageClassification
|
| 23 |
from torchvision import transforms
|
|
|
|
| 32 |
os.environ['HF_HUB_CACHE'] = './models'
|
| 33 |
|
| 34 |
LOCAL_LOG_DIR = "./hf_inference_logs"
|
| 35 |
+
HF_DATASET_NAME="aiwithoutborders-xyz/degentic_rd0"
|
| 36 |
load_dotenv()
|
| 37 |
|
| 38 |
# Custom JSON Encoder to handle numpy types
|
|
|
|
| 166 |
display_name="ViT", contributor="temp", model_path=MODEL_PATHS["model_7"]
|
| 167 |
)
|
| 168 |
|
| 169 |
+
|
| 170 |
def infer(image: Image.Image, model_id: str, confidence_threshold: float = 0.75) -> dict:
|
| 171 |
+
"""Predict using a specific model.
|
| 172 |
+
|
| 173 |
+
Args:
|
| 174 |
+
image (Image.Image): The input image to classify.
|
| 175 |
+
model_id (str): The ID of the model to use for classification.
|
| 176 |
+
confidence_threshold (float, optional): The confidence threshold for classification. Defaults to 0.75.
|
| 177 |
+
|
| 178 |
+
Returns:
|
| 179 |
+
dict: A dictionary containing the model details, classification scores, and label.
|
| 180 |
+
"""
|
| 181 |
entry = MODEL_REGISTRY[model_id]
|
| 182 |
img = entry.preprocess(image)
|
| 183 |
try:
|
|
|
|
| 205 |
}
|
| 206 |
|
| 207 |
def predict_with_ensemble(img, confidence_threshold, augment_methods, rotate_degrees, noise_level, sharpen_strength):
|
| 208 |
+
"""Full ensemble prediction pipeline.
|
| 209 |
+
|
| 210 |
+
Args:
|
| 211 |
+
img (Image.Image): The input image to classify.
|
| 212 |
+
confidence_threshold (float): The confidence threshold for classification.
|
| 213 |
+
augment_methods (list): The augmentation methods to apply to the image.
|
| 214 |
+
rotate_degrees (int): The degrees to rotate the image.
|
| 215 |
+
noise_level (int): The noise level to add to the image.
|
| 216 |
+
sharpen_strength (int): The strength of the sharpening to apply to the image.
|
| 217 |
+
|
| 218 |
+
Raises:
|
| 219 |
+
ValueError: If the input image could not be converted to a PIL Image.
|
| 220 |
+
|
| 221 |
+
Returns:
|
| 222 |
+
tuple: A tuple containing the processed image, forensic images, model predictions, raw model results, and consensus.
|
| 223 |
+
"""
|
| 224 |
if not isinstance(img, Image.Image):
|
| 225 |
try:
|
| 226 |
img = Image.fromarray(img)
|
forensics/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .bitplane import bit_plane_extractor
|
| 2 |
+
from .ela import ELA
|
| 3 |
+
from .exif import exif_full_dump
|
| 4 |
+
from .gradient import gradient_processing
|
| 5 |
+
from .minmax import minmax_process
|
| 6 |
+
from .wavelet import wavelet_blocking_noise_estimation
|
| 7 |
+
|
| 8 |
+
__all__ = [
|
| 9 |
+
'bit_plane_extractor',
|
| 10 |
+
'ELA',
|
| 11 |
+
'exif_full_dump',
|
| 12 |
+
'gradient_processing',
|
| 13 |
+
'minmax_process',
|
| 14 |
+
'wavelet_blocking_noise_estimation'
|
| 15 |
+
]
|
{utils β forensics}/bitplane.py
RENAMED
|
File without changes
|
{utils β forensics}/ela.py
RENAMED
|
@@ -61,4 +61,4 @@ def ELA(img, quality=75, scale=50, contrast=20, linear=False, grayscale=False):
|
|
| 61 |
if grayscale:
|
| 62 |
ela = desaturate(ela)
|
| 63 |
|
| 64 |
-
return Image.fromarray(ela)
|
|
|
|
| 61 |
if grayscale:
|
| 62 |
ela = desaturate(ela)
|
| 63 |
|
| 64 |
+
return Image.fromarray(ela)
|
{utils β forensics}/exif.py
RENAMED
|
File without changes
|
{utils β forensics}/gradient.py
RENAMED
|
@@ -47,4 +47,4 @@ def gradient_processing(image, intensity=90, blue_mode="Abs", invert=False, equa
|
|
| 47 |
gradient = equalize_img(gradient)
|
| 48 |
elif intensity > 0:
|
| 49 |
gradient = cv.LUT(gradient, create_lut(intensity, intensity))
|
| 50 |
-
return Image.fromarray(gradient)
|
|
|
|
| 47 |
gradient = equalize_img(gradient)
|
| 48 |
elif intensity > 0:
|
| 49 |
gradient = cv.LUT(gradient, create_lut(intensity, intensity))
|
| 50 |
+
return Image.fromarray(gradient)
|
{utils β forensics}/minmax.py
RENAMED
|
@@ -91,4 +91,4 @@ def minmax_process(image, channel=4, radius=2):
|
|
| 91 |
elif channel == 3:
|
| 92 |
minmax[low] = [255, 255, 255]
|
| 93 |
minmax[high] = [255, 255, 255]
|
| 94 |
-
return Image.fromarray(minmax)
|
|
|
|
| 91 |
elif channel == 3:
|
| 92 |
minmax[low] = [255, 255, 255]
|
| 93 |
minmax[high] = [255, 255, 255]
|
| 94 |
+
return Image.fromarray(minmax)
|
{utils β forensics}/wavelet.py
RENAMED
|
File without changes
|
requirements.txt
CHANGED
|
@@ -1,20 +1,36 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
transformers
|
| 5 |
-
|
| 6 |
torchvision
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
# pillow
|
| 10 |
opencv-python
|
| 11 |
-
modelscope_studio
|
| 12 |
-
pydantic==2.10.6
|
| 13 |
-
tf-keras
|
| 14 |
-
PyWavelets
|
| 15 |
-
pyexiftool
|
| 16 |
-
psutil
|
| 17 |
-
datasets
|
| 18 |
Pillow
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
python-dotenv
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
--index-url https://download.pytorch.org/whl/nightly/cpu
|
| 2 |
+
|
| 3 |
+
# Core ML/AI libraries
|
| 4 |
+
transformers>=4.48.2
|
| 5 |
+
torch
|
| 6 |
torchvision
|
| 7 |
+
torchaudio
|
| 8 |
+
# Image processing
|
|
|
|
| 9 |
opencv-python
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
Pillow
|
| 11 |
+
|
| 12 |
+
# Wavelet processing
|
| 13 |
+
PyWavelets==1.8.0
|
| 14 |
+
|
| 15 |
+
# System utilities
|
| 16 |
+
psutil
|
| 17 |
python-dotenv
|
| 18 |
+
|
| 19 |
+
# Gradio and UI
|
| 20 |
+
gradio[mcp]>=5.33.1
|
| 21 |
+
# gradio_leaderboard==0.0.13
|
| 22 |
+
gradio_client==1.10.3
|
| 23 |
+
spaces
|
| 24 |
+
|
| 25 |
+
# HuggingFace ecosystem
|
| 26 |
+
huggingface_hub[hf_xet]>=0.32.0
|
| 27 |
+
datasets>=3.6.0
|
| 28 |
+
|
| 29 |
+
# Data validation and utilities
|
| 30 |
+
pydantic==2.11.5
|
| 31 |
+
|
| 32 |
+
# AI agents
|
| 33 |
+
smolagents[all]
|
| 34 |
+
|
| 35 |
+
# Optional: EXIF metadata (if needed)
|
| 36 |
+
pyexiftool
|
utils/hf_logger.py
CHANGED
|
@@ -5,7 +5,7 @@ import io
|
|
| 5 |
import datetime
|
| 6 |
from PIL import Image
|
| 7 |
import logging
|
| 8 |
-
from huggingface_hub import HfApi,
|
| 9 |
import numpy as np
|
| 10 |
|
| 11 |
logger = logging.getLogger(__name__)
|
|
@@ -92,6 +92,22 @@ def log_inference_data(
|
|
| 92 |
with open(log_file_path, 'w', encoding='utf-8') as f:
|
| 93 |
json.dump(new_entry, f, cls=NumpyEncoder, indent=2)
|
| 94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
logger.info(f"Inference data logged successfully to local file: {log_file_path}")
|
| 96 |
|
| 97 |
except Exception as e:
|
|
|
|
| 5 |
import datetime
|
| 6 |
from PIL import Image
|
| 7 |
import logging
|
| 8 |
+
from huggingface_hub import HfApi, CommitScheduler # Keep HfApi for repo creation, but remove CommitOperationAdd for direct upload
|
| 9 |
import numpy as np
|
| 10 |
|
| 11 |
logger = logging.getLogger(__name__)
|
|
|
|
| 92 |
with open(log_file_path, 'w', encoding='utf-8') as f:
|
| 93 |
json.dump(new_entry, f, cls=NumpyEncoder, indent=2)
|
| 94 |
|
| 95 |
+
# Schedule commit to Hugging Face dataset repository
|
| 96 |
+
scheduler = CommitScheduler(
|
| 97 |
+
repo_id=HF_DATASET_NAME,
|
| 98 |
+
repo_type="dataset",
|
| 99 |
+
folder_path=LOCAL_LOG_DIR,
|
| 100 |
+
path_in_repo="logs",
|
| 101 |
+
token=os.getenv("HF_TOKEN"),
|
| 102 |
+
every=10 # Commit every 10 files
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
# Add the file to the scheduler
|
| 106 |
+
scheduler.push_to_hub(
|
| 107 |
+
path_or_fileobj=log_file_path,
|
| 108 |
+
path_in_repo=f"logs/log_{timestamp_str}.json"
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
logger.info(f"Inference data logged successfully to local file: {log_file_path}")
|
| 112 |
|
| 113 |
except Exception as e:
|
{forensics β utils}/registry.py
RENAMED
|
File without changes
|