ruslanmv commited on
Commit
4ebc629
·
1 Parent(s): 92717ee
Files changed (2) hide show
  1. app.py +58 -39
  2. requirements.txt +1 -0
app.py CHANGED
@@ -1,6 +1,5 @@
1
  run_api = False
2
  SSD_1B = False
3
-
4
  import os
5
 
6
  # Use GPU
@@ -13,6 +12,7 @@ else:
13
  is_gpu = True
14
  print(is_gpu)
15
 
 
16
  from IPython.display import clear_output
17
 
18
 
@@ -38,6 +38,7 @@ def check_enviroment():
38
  # Call the function to check and install Packages if necessary
39
  check_enviroment()
40
 
 
41
  from IPython.display import clear_output
42
  import os
43
  import gradio as gr
@@ -70,51 +71,50 @@ else:
70
  # Uncomment the following line if you want to use CPU instead of GPU
71
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
72
 
73
- if torch.cuda.is_available():
74
- # Get the current directory
75
- current_dir = os.getcwd()
76
- model_path = os.path.join(current_dir)
77
 
78
- # Set the cache path
79
- cache_path = os.path.join(current_dir, "cache")
 
 
 
 
80
 
81
- if not SSD_1B:
82
 
83
- unet = UNet2DConditionModel.from_pretrained(
84
- "latent-consistency/lcm-sdxl",
85
- torch_dtype=torch.float16,
86
- variant="fp16",
87
- cache_dir=cache_path,
88
- )
89
- pipe = DiffusionPipeline.from_pretrained(
90
- "stabilityai/stable-diffusion-xl-base-1.0",
91
- unet=unet,
92
- torch_dtype=torch.float16,
93
- variant="fp16",
94
- cache_dir=cache_path,
95
- )
96
 
97
- pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
 
98
  pipe.to("cuda")
99
- else:
100
- # SSD-1B
101
- from diffusers import LCMScheduler, AutoPipelineForText2Image
102
-
103
- pipe = AutoPipelineForText2Image.from_pretrained(
104
- "segmind/SSD-1B",
105
- torch_dtype=torch.float16,
106
- variant="fp16",
107
- cache_dir=cache_path,
108
- )
109
- pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
 
110
  pipe.to("cuda")
111
 
112
- # load and fuse
113
- pipe.load_lora_weights("latent-consistency/lcm-lora-ssd-1b")
114
- pipe.fuse_lora()
115
-
116
- else:
117
- pipe = None
118
 
119
 
120
  def generate(
@@ -149,6 +149,25 @@ def generate(
149
 
150
  clear_output()
151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  if not run_api:
153
  secret_token = gr.Text(
154
  label="Secret Token",
 
1
  run_api = False
2
  SSD_1B = False
 
3
  import os
4
 
5
  # Use GPU
 
12
  is_gpu = True
13
  print(is_gpu)
14
 
15
+
16
  from IPython.display import clear_output
17
 
18
 
 
38
  # Call the function to check and install Packages if necessary
39
  check_enviroment()
40
 
41
+
42
  from IPython.display import clear_output
43
  import os
44
  import gradio as gr
 
71
  # Uncomment the following line if you want to use CPU instead of GPU
72
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
73
 
 
 
 
 
74
 
75
+ # Get the current directory
76
+ current_dir = os.getcwd()
77
+ model_path = os.path.join(current_dir)
78
+
79
+ # Set the cache path
80
+ cache_path = os.path.join(current_dir, "cache")
81
 
82
+ if not SSD_1B:
83
 
84
+ unet = UNet2DConditionModel.from_pretrained(
85
+ "latent-consistency/lcm-sdxl",
86
+ torch_dtype=torch.float16,
87
+ variant="fp16",
88
+ cache_dir=cache_path,
89
+ )
90
+ pipe = DiffusionPipeline.from_pretrained(
91
+ "stabilityai/stable-diffusion-xl-base-1.0",
92
+ unet=unet,
93
+ torch_dtype=torch.float16,
94
+ variant="fp16",
95
+ cache_dir=cache_path,
96
+ )
97
 
98
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
99
+ if torch.cuda.is_available():
100
  pipe.to("cuda")
101
+ else:
102
+ # SSD-1B
103
+ from diffusers import LCMScheduler, AutoPipelineForText2Image
104
+
105
+ pipe = AutoPipelineForText2Image.from_pretrained(
106
+ "segmind/SSD-1B",
107
+ torch_dtype=torch.float16,
108
+ variant="fp16",
109
+ cache_dir=cache_path,
110
+ )
111
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
112
+ if torch.cuda.is_available():
113
  pipe.to("cuda")
114
 
115
+ # load and fuse
116
+ pipe.load_lora_weights("latent-consistency/lcm-lora-ssd-1b")
117
+ pipe.fuse_lora()
 
 
 
118
 
119
 
120
  def generate(
 
149
 
150
  clear_output()
151
 
152
+ from IPython.display import display
153
+
154
+
155
+ def generate_image(prompt="A beautiful and sexy girl"):
156
+ # Generate the image using the prompt
157
+ generated_image = generate(
158
+ prompt=prompt,
159
+ negative_prompt="",
160
+ seed=0,
161
+ width=1024,
162
+ height=1024,
163
+ guidance_scale=0.0,
164
+ num_inference_steps=4,
165
+ secret_token="default_secret", # Replace with your secret token
166
+ )
167
+ # Display the image in the Jupyter Notebook
168
+ display(generated_image)
169
+
170
+
171
  if not run_api:
172
  secret_token = gr.Text(
173
  label="Secret Token",
requirements.txt CHANGED
@@ -5,3 +5,4 @@ invisible-watermark==0.2.0
5
  Pillow==10.1.0
6
  torch==2.1.0
7
  transformers==4.35.0
 
 
5
  Pillow==10.1.0
6
  torch==2.1.0
7
  transformers==4.35.0
8
+ ipython