Spaces:
Runtime error
Runtime error
- scripts/anime.py +0 -2
- scripts/process_utils.py +1 -4
scripts/anime.py
CHANGED
|
@@ -25,7 +25,6 @@ def init_model(use_local=False):
|
|
| 25 |
model.eval()
|
| 26 |
|
| 27 |
# numpy้
ๅใฎ็ปๅใๅใๅใใ็ท็ปใ็ๆใใฆnumpy้
ๅใง่ฟใ
|
| 28 |
-
# @spaces.GPU
|
| 29 |
def generate_sketch(image, clahe_clip=-1, load_size=512):
|
| 30 |
"""
|
| 31 |
Generate sketch image from input image
|
|
@@ -67,7 +66,6 @@ def generate_sketch(image, clahe_clip=-1, load_size=512):
|
|
| 67 |
aus_img = tensor_to_img(aus_tensor)
|
| 68 |
return aus_img
|
| 69 |
|
| 70 |
-
|
| 71 |
if __name__ == '__main__':
|
| 72 |
os.chdir(os.path.dirname("Anime2Sketch/"))
|
| 73 |
parser = argparse.ArgumentParser(description='Anime-to-sketch test options.')
|
|
|
|
| 25 |
model.eval()
|
| 26 |
|
| 27 |
# numpy้
ๅใฎ็ปๅใๅใๅใใ็ท็ปใ็ๆใใฆnumpy้
ๅใง่ฟใ
|
|
|
|
| 28 |
def generate_sketch(image, clahe_clip=-1, load_size=512):
|
| 29 |
"""
|
| 30 |
Generate sketch image from input image
|
|
|
|
| 66 |
aus_img = tensor_to_img(aus_tensor)
|
| 67 |
return aus_img
|
| 68 |
|
|
|
|
| 69 |
if __name__ == '__main__':
|
| 70 |
os.chdir(os.path.dirname("Anime2Sketch/"))
|
| 71 |
parser = argparse.ArgumentParser(description='Anime-to-sketch test options.')
|
scripts/process_utils.py
CHANGED
|
@@ -17,7 +17,7 @@ from scripts.hf_utils import download_file
|
|
| 17 |
use_local = False
|
| 18 |
model = None
|
| 19 |
device = None
|
| 20 |
-
torch_dtype = None
|
| 21 |
sotai_gen_pipe = None
|
| 22 |
refine_gen_pipe = None
|
| 23 |
|
|
@@ -43,7 +43,6 @@ def initialize(_use_local=False, use_gpu=False, use_dotenv=False):
|
|
| 43 |
print(f"\nDevice: {device}, Local model: {_use_local}\n")
|
| 44 |
|
| 45 |
init_model(use_local)
|
| 46 |
-
# model = load_wd14_tagger_model()
|
| 47 |
sotai_gen_pipe = initialize_sotai_model()
|
| 48 |
refine_gen_pipe = initialize_refine_model()
|
| 49 |
|
|
@@ -201,7 +200,6 @@ def create_rgba_image(binary_image: np.ndarray, color: list) -> Image.Image:
|
|
| 201 |
rgba_image[:, :, 3] = binary_image
|
| 202 |
return Image.fromarray(rgba_image, 'RGBA')
|
| 203 |
|
| 204 |
-
# @spaces.GPU
|
| 205 |
def generate_sotai_image(input_image: Image.Image, output_width: int, output_height: int) -> Image.Image:
|
| 206 |
input_image = ensure_rgb(input_image)
|
| 207 |
global sotai_gen_pipe
|
|
@@ -246,7 +244,6 @@ def generate_sotai_image(input_image: Image.Image, output_width: int, output_hei
|
|
| 246 |
torch.cuda.empty_cache()
|
| 247 |
gc.collect()
|
| 248 |
|
| 249 |
-
# @spaces.GPU
|
| 250 |
def generate_refined_image(prompt: str, original_image: Image.Image, output_width: int, output_height: int, weight1: float, weight2: float) -> Image.Image:
|
| 251 |
original_image = ensure_rgb(original_image)
|
| 252 |
global refine_gen_pipe
|
|
|
|
| 17 |
use_local = False
|
| 18 |
model = None
|
| 19 |
device = None
|
| 20 |
+
torch_dtype = None
|
| 21 |
sotai_gen_pipe = None
|
| 22 |
refine_gen_pipe = None
|
| 23 |
|
|
|
|
| 43 |
print(f"\nDevice: {device}, Local model: {_use_local}\n")
|
| 44 |
|
| 45 |
init_model(use_local)
|
|
|
|
| 46 |
sotai_gen_pipe = initialize_sotai_model()
|
| 47 |
refine_gen_pipe = initialize_refine_model()
|
| 48 |
|
|
|
|
| 200 |
rgba_image[:, :, 3] = binary_image
|
| 201 |
return Image.fromarray(rgba_image, 'RGBA')
|
| 202 |
|
|
|
|
| 203 |
def generate_sotai_image(input_image: Image.Image, output_width: int, output_height: int) -> Image.Image:
|
| 204 |
input_image = ensure_rgb(input_image)
|
| 205 |
global sotai_gen_pipe
|
|
|
|
| 244 |
torch.cuda.empty_cache()
|
| 245 |
gc.collect()
|
| 246 |
|
|
|
|
| 247 |
def generate_refined_image(prompt: str, original_image: Image.Image, output_width: int, output_height: int, weight1: float, weight2: float) -> Image.Image:
|
| 248 |
original_image = ensure_rgb(original_image)
|
| 249 |
global refine_gen_pipe
|