import gradio as gr from huggingface_hub import PyTorchModelHubMixin import torch import matplotlib.pyplot as plt import torchvision from networks_fastgan import MyGenerator import click import PIL @click.command() @click.option('--seeds', type=parse_range, help='List of random seeds (e.g., \'0,1,4-6\')', default = 10-15, required=True) @click.option('--trunc', 'truncation_psi', type=float, help='Truncation psi', default=1, show_default=True) @click.option('--class', 'class_idx', type=int, help='Class label (unconditional if not specified)') @click.option('--noise-mode', help='Noise mode', type=click.Choice(['const', 'random', 'none']), default='const', show_default=True) @click.option('--translate', help='Translate XY-coordinate (e.g. \'0.3,1\')', type=parse_vec2, default='0,0', show_default=True, metavar='VEC2') @click.option('--rotate', help='Rotation angle in degrees', type=float, default=0, show_default=True, metavar='ANGLE') @click.option('--outdir', help='Where to save the output images', type=str, required=True, metavar='DIR') def generate_images( seeds: List[int], truncation_psi: float, noise_mode: str, outdir: str, translate: Tuple[float,float], rotate: float, class_idx: Optional[int] ): """Generate images using pretrained network pickle. Examples: \b # Generate an image using pre-trained AFHQv2 model ("Ours" in Figure 1, left). python gen_images.py --outdir=out --trunc=1 --seeds=2 \\ --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-afhqv2-512x512.pkl \b # Generate uncurated images with truncation using the MetFaces-U dataset python gen_images.py --outdir=out --trunc=0.7 --seeds=600-605 \\ --network=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-t-metfacesu-1024x1024.pkl """ device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') G = MyGenerator.from_pretrained("Cropinky/projected_gan_impressionism") print("network loaded") # Labels. label = torch.zeros([1, G.c_dim], device=device) if G.c_dim != 0: if class_idx is None: raise click.ClickException('Must specify class label with --class when using a conditional network') label[:, class_idx] = 1 else: if class_idx is not None: print ('warn: --class=lbl ignored when running on an unconditional network') # Generate images. for seed_idx, seed in enumerate(seeds): print('Generating image for seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds))) z = torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device).float() # Construct an inverse rotation/translation matrix and pass to the generator. The # generator expects this matrix as an inverse to avoid potentially failing numerical # operations in the network. if hasattr(G.synthesis, 'input'): m = make_transform(translate, rotate) m = np.linalg.inv(m) G.synthesis.input.transform.copy_(torch.from_numpy(m)) img = G(z, label, truncation_psi=truncation_psi, noise_mode=noise_mode) img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8) PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB').save(f'{outdir}/seed{seed:04d}.png') def image_generation(model, number_of_images=1): G = MyGenerator.from_pretrained("Cropinky/projected_gan_impressionism") return f"generating {number_of_images} images from {model}" if __name__ == "__main__": inputs = gr.inputs.Radio(["Abstract Expressionism", "Impressionism", "Cubism", "Minimalism", "Pop Art", "Color Field", "Hana Hanak houses"]) #outputs = gr.outputs.Image(label="Output Image") outputs = "text" title = "Projected GAN for painting generation" description = "Choose your artistic direction " article = "

Official projected GAN github repo + paper

" gr.Interface(image_generation, inputs, outputs, title=title, article = article, description=description, analytics_enabled=False).launch(debug=True) app, local_url, share_url = iface.launch()