Spaces:
Runtime error
Runtime error
SuperSecureHuman
commited on
initial commit
Browse files- app.py +117 -0
- trained.h5 +3 -0
app.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tensorflow as tf
|
| 2 |
+
import matplotlib.pyplot as plt
|
| 3 |
+
from tensorflow import keras
|
| 4 |
+
from tensorflow.keras import layers
|
| 5 |
+
import gradio as gr
|
| 6 |
+
|
| 7 |
+
# Define EDSR custom model
|
| 8 |
+
|
| 9 |
+
class EDSRModel(tf.keras.Model):
|
| 10 |
+
def train_step(self, data):
|
| 11 |
+
# Unpack the data. Its structure depends on your model and
|
| 12 |
+
# on what you pass to `fit()`.
|
| 13 |
+
x, y = data
|
| 14 |
+
|
| 15 |
+
with tf.GradientTape() as tape:
|
| 16 |
+
y_pred = self(x, training=True) # Forward pass
|
| 17 |
+
# Compute the loss value
|
| 18 |
+
# (the loss function is configured in `compile()`)
|
| 19 |
+
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
|
| 20 |
+
|
| 21 |
+
# Compute gradients
|
| 22 |
+
trainable_vars = self.trainable_variables
|
| 23 |
+
gradients = tape.gradient(loss, trainable_vars)
|
| 24 |
+
# Update weights
|
| 25 |
+
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
|
| 26 |
+
# Update metrics (includes the metric that tracks the loss)
|
| 27 |
+
self.compiled_metrics.update_state(y, y_pred)
|
| 28 |
+
# Return a dict mapping metric names to current value
|
| 29 |
+
return {m.name: m.result() for m in self.metrics}
|
| 30 |
+
|
| 31 |
+
def predict_step(self, x):
|
| 32 |
+
# Adding dummy dimension using tf.expand_dims and converting to float32 using tf.cast
|
| 33 |
+
x = tf.cast(tf.expand_dims(x, axis=0), tf.float32)
|
| 34 |
+
# Passing low resolution image to model
|
| 35 |
+
super_resolution_img = self(x, training=False)
|
| 36 |
+
# Clips the tensor from min(0) to max(255)
|
| 37 |
+
super_resolution_img = tf.clip_by_value(super_resolution_img, 0, 255)
|
| 38 |
+
# Rounds the values of a tensor to the nearest integer
|
| 39 |
+
super_resolution_img = tf.round(super_resolution_img)
|
| 40 |
+
# Removes dimensions of size 1 from the shape of a tensor and converting to uint8
|
| 41 |
+
super_resolution_img = tf.squeeze(
|
| 42 |
+
tf.cast(super_resolution_img, tf.uint8), axis=0
|
| 43 |
+
)
|
| 44 |
+
return super_resolution_img
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# Residual Block
|
| 48 |
+
def ResBlock(inputs):
|
| 49 |
+
x = layers.Conv2D(64, 3, padding="same", activation="relu")(inputs)
|
| 50 |
+
x = layers.Conv2D(64, 3, padding="same")(x)
|
| 51 |
+
x = layers.Add()([inputs, x])
|
| 52 |
+
return x
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# Upsampling Block
|
| 56 |
+
def Upsampling(inputs, factor=2, **kwargs):
|
| 57 |
+
x = layers.Conv2D(64 * (factor ** 2), 3, padding="same", **kwargs)(inputs)
|
| 58 |
+
x = tf.nn.depth_to_space(x, block_size=factor)
|
| 59 |
+
x = layers.Conv2D(64 * (factor ** 2), 3, padding="same", **kwargs)(x)
|
| 60 |
+
x = tf.nn.depth_to_space(x, block_size=factor)
|
| 61 |
+
return x
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def make_model(num_filters, num_of_residual_blocks):
|
| 65 |
+
# Flexible Inputs to input_layer
|
| 66 |
+
input_layer = layers.Input(shape=(None, None, 3))
|
| 67 |
+
# Scaling Pixel Values
|
| 68 |
+
x = layers.Rescaling(scale=1.0 / 255)(input_layer)
|
| 69 |
+
x = x_new = layers.Conv2D(num_filters, 3, padding="same")(x)
|
| 70 |
+
|
| 71 |
+
# 16 residual blocks
|
| 72 |
+
for _ in range(num_of_residual_blocks):
|
| 73 |
+
x_new = ResBlock(x_new)
|
| 74 |
+
|
| 75 |
+
x_new = layers.Conv2D(num_filters, 3, padding="same")(x_new)
|
| 76 |
+
x = layers.Add()([x, x_new])
|
| 77 |
+
|
| 78 |
+
x = Upsampling(x)
|
| 79 |
+
x = layers.Conv2D(3, 3, padding="same")(x)
|
| 80 |
+
|
| 81 |
+
output_layer = layers.Rescaling(scale=255)(x)
|
| 82 |
+
return EDSRModel(input_layer, output_layer)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# Define PSNR metric
|
| 86 |
+
|
| 87 |
+
def PSNR(super_resolution, high_resolution):
|
| 88 |
+
"""Compute the peak signal-to-noise ratio, measures quality of image."""
|
| 89 |
+
# Max value of pixel is 255
|
| 90 |
+
psnr_value = tf.image.psnr(high_resolution, super_resolution, max_val=255)[0]
|
| 91 |
+
return psnr_value
|
| 92 |
+
|
| 93 |
+
custom_objects = {"EDSRModel":EDSRModel}
|
| 94 |
+
|
| 95 |
+
with keras.utils.custom_object_scope(custom_objects):
|
| 96 |
+
new_model = keras.models.load_model("./trained.h5", custom_objects={'PSNR':PSNR})
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def process_image(img):
|
| 100 |
+
lowres = tf.convert_to_tensor(img, dtype=tf.uint8)
|
| 101 |
+
lowres = tf.image.random_crop(lowres, (150, 150, 3))
|
| 102 |
+
preds = new_model.predict_step(lowres)
|
| 103 |
+
preds = preds.numpy()
|
| 104 |
+
return preds
|
| 105 |
+
|
| 106 |
+
image = gr.inputs.Image()
|
| 107 |
+
image_out = gr.outputs.Image()
|
| 108 |
+
|
| 109 |
+
gr.Interface(
|
| 110 |
+
process_image,
|
| 111 |
+
title="EDSR",
|
| 112 |
+
description="SuperResolution",
|
| 113 |
+
inputs = image,
|
| 114 |
+
outputs = image_out,
|
| 115 |
+
interpretation='default',
|
| 116 |
+
allow_flagging='never'
|
| 117 |
+
).launch()
|
trained.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c89499b79222015964a9f4e29f16801a6e987acdef00e6865c80141cb9e08150
|
| 3 |
+
size 18563184
|