File size: 3,614 Bytes
40ed941
 
 
 
 
 
 
 
 
 
 
 
 
3cb83a6
 
 
 
40ed941
3cb83a6
40ed941
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3cb83a6
40ed941
3cb83a6
 
 
 
 
40ed941
 
 
 
 
 
 
3cb83a6
40ed941
 
 
 
 
 
 
 
 
3cb83a6
40ed941
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee7b089
40ed941
 
 
 
 
 
 
fcb1af4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import random

import numpy as np
import torch
import spaces
import gradio as gr
from diffusers import FluxFillPipeline

MAX_SEED = np.iinfo(np.int32).max

pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16)
lora_repo = "strangerzonehf/Flux-Super-Realism-LoRA"
trigger_word = "Super Realism"

lora_repo_2 = "strangerzonehf/Flux-SuperPortrait-v2-LoRA"
trigger_word_2 = "Super Portrait v2"

pipe.load_lora_weights(lora_repo)
pipe.load_lora_weights(lora_repo_2)
pipe.to("cuda")

# reference https://huggingface.co/spaces/black-forest-labs/FLUX.1-Fill-dev/blob/main/app.py 
def calculate_optimal_dimensions(image):
    # Extract the original dimensions
    original_width, original_height = image.size
    
    # Set constants
    MIN_ASPECT_RATIO = 9 / 16
    MAX_ASPECT_RATIO = 16 / 9
    FIXED_DIMENSION = 1024

    # Calculate the aspect ratio of the original image
    original_aspect_ratio = original_width / original_height

    # Determine which dimension to fix
    if original_aspect_ratio > 1:  # Wider than tall
        width = FIXED_DIMENSION
        height = round(FIXED_DIMENSION / original_aspect_ratio)
    else:  # Taller than wide
        height = FIXED_DIMENSION
        width = round(FIXED_DIMENSION * original_aspect_ratio)

    # Ensure dimensions are multiples of 8
    width = (width // 8) * 8
    height = (height // 8) * 8

    # Enforce aspect ratio limits
    calculated_aspect_ratio = width / height
    if calculated_aspect_ratio > MAX_ASPECT_RATIO:
        width = (height * MAX_ASPECT_RATIO // 8) * 8
    elif calculated_aspect_ratio < MIN_ASPECT_RATIO:
        height = (width / MIN_ASPECT_RATIO // 8) * 8

    # Ensure width and height remain above the minimum dimensions
    width = max(width, 576) if width == FIXED_DIMENSION else width
    height = max(height, 576) if height == FIXED_DIMENSION else height

    return width, height


@spaces.GPU(duration=30)
def inpaint(
    image,
    mask,
    prompt="",
    seed=0,
    num_inference_steps=28,
    guidance_scale=50,
):
    image = image.convert("RGB")
    mask = mask.convert("L")
    width, height = calculate_optimal_dimensions(image)

    final_prompt = ""
    if trigger_word:
        prompt = f"{trigger_word}\n\n"
    if trigger_word_2:
        prompt = f"{trigger_word_2}\n\n"

    final_prompt += prompt
        
    if not isinstance(seed, int) or seed <= 0:        
        seed = random.randint(0, MAX_SEED)
        
    result = pipe(
        image=image,
        mask_image=mask,
        prompt=final_prompt,
        width=width,        
        height=height,        
        num_inference_steps=num_inference_steps,
        guidance_scale=guidance_scale,
        generator = torch.Generator().manual_seed(seed)
    ).images[0]

    result = result.convert("RGBA")

    return result, final_prompt, seed

demo = gr.Interface(
    fn=inpaint,
    inputs=[
        gr.Image(label="image", type="pil"),
        gr.Image(label="mask", type="pil"),
        gr.Text(label="prompt", lines=4),
        gr.Slider(
            label="Seed",
            minimum=0,
            maximum=MAX_SEED,
            step=1,
            value=0,
            info="(0 = Random)"
        ),
        gr.Number(value=40, label="num_inference_steps"),
        gr.Number(value=28, label="guidance_scale"),
    ],
    outputs=[
        gr.Image(label="Result"),
        gr.Text(label="Prompt used", lines=4),
        gr.Number(label="Seed")
    ],
    api_name="inpaint",
    examples=[["./assets/rocket.png", "./assets/Inpainting mask.png"]],
    cache_examples=False
)

demo.launch()