File size: 2,499 Bytes
d0fef57
 
d561a66
d0fef57
e6ac7d7
 
 
d0fef57
ac65561
 
d0fef57
ac65561
d0fef57
ac65561
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76cb1c1
ac65561
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76cb1c1
d561a66
ac65561
 
 
 
 
 
 
d561a66
ac65561
95c11a1
ac65561
 
 
 
76cb1c1
ac65561
 
 
 
 
e6ac7d7
b261577
ac65561
 
 
 
 
 
 
 
 
76cb1c1
ac65561
 
 
 
ed9b243
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import os
import cv2
import gradio as gr
import torch
from basicsr.archs.srvgg_arch import SRVGGNetCompact
from gfpgan.utils import GFPGANer
from realesrgan.utils import RealESRGANer

# Create output directory
os.makedirs("output", exist_ok=True)

# Load background upsampler (RealESRGAN)
model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
upsampler = RealESRGANer(
    scale=4,
    model_path='realesr-general-x4v3.pth',
    model=model,
    tile=0,
    tile_pad=10,
    pre_pad=0,
    half=torch.cuda.is_available()
)

def inference(img_path, version, scale):
    extension = os.path.splitext(os.path.basename(str(img_path)))[1]
    img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)

    if img is None:
        return None, None

    if len(img.shape) == 2:
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    elif img.shape[2] == 4:
        img = img[:, :, :3]  # Remove alpha

    if version == 'v1.2':
        model_path = 'GFPGANv1.2.pth'
        arch = 'clean'
    elif version == 'v1.3':
        model_path = 'GFPGANv1.3.pth'
        arch = 'clean'
    elif version == 'v1.4':
        model_path = 'GFPGANv1.4.pth'
        arch = 'clean'
    elif version == 'RestoreFormer':
        model_path = 'RestoreFormer.pth'
        arch = 'RestoreFormer'
    else:
        return None, None

    face_enhancer = GFPGANer(
        model_path=model_path,
        upscale=2,
        arch=arch,
        channel_multiplier=2,
        bg_upsampler=upsampler
    )

    _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)

    # Rescale
    if scale != 2:
        h, w = output.shape[:2]
        output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=cv2.INTER_LANCZOS4)

    save_path = f"output/restored_{version}.jpg"
    cv2.imwrite(save_path, output)
    output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)

    return output, save_path

demo = gr.Interface(
    fn=inference,
    inputs=[
        gr.Image(type="filepath", label="Input Image"),
        gr.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer'], label="GFPGAN Version", value="v1.4"),
        gr.Slider(1, 4, value=2, label="Rescaling Factor")
    ],
    outputs=[
        gr.Image(type="numpy", label="Restored Image"),
        gr.File(label="Download")
    ],
    title="GFPGAN Face Restoration on Hugging Face",
    description="Restore old or AI-generated faces using GFPGAN."
)

demo.queue().launch()