ruslanmv commited on
Commit
6fdbc47
·
1 Parent(s): 44b7f30

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +275 -158
app.py CHANGED
@@ -1,206 +1,323 @@
1
- # ---- Flags ----
2
- run_api = False
3
- SSD_1B = False # True = use SSD-1B + LCM LoRA, False = SDXL Base + LCM (default)
4
 
5
- # ---- Standard imports ----
6
  import os
 
 
7
  import subprocess
8
- import numpy as np
9
 
10
- # Optional: clear_output is nice in notebooks; ignore if not available
11
- try:
12
- from IPython.display import clear_output # noqa: F401
13
- except Exception:
14
- def clear_output(): # no-op outside notebooks
15
- pass
 
 
 
 
 
 
16
 
17
- # ---- Tame NVML noise in containers without GPU drivers (optional) ----
18
- os.environ.setdefault("DEEPSPEED_DISABLE_NVML", "1")
 
 
 
 
 
 
 
 
 
 
19
  import warnings
20
  warnings.filterwarnings("ignore", message="Can't initialize NVML")
21
 
22
- # ---- App imports (expect deps from requirements.txt already installed) ----
23
  import torch
24
- import gradio as gr
25
  from PIL import Image
26
- from diffusers import UNet2DConditionModel, DiffusionPipeline, LCMScheduler
 
 
 
 
 
 
27
 
28
- # ---- Config / constants ----
29
- current_dir = os.getcwd()
30
- cache_path = os.path.join(current_dir, "cache")
31
- os.makedirs(cache_path, exist_ok=True)
 
 
 
 
 
 
 
32
 
33
- MAX_SEED = np.iinfo(np.int32).max
34
- MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
35
- SECRET_TOKEN = os.getenv("SECRET_TOKEN", "default_secret")
 
36
 
37
- # ---- GPU visibility / info (for logs only) ----
38
- def print_nvidia_smi():
39
  try:
40
- proc = subprocess.run(["nvidia-smi"], capture_output=True, text=True)
41
  if proc.returncode == 0 and proc.stdout.strip():
42
- print(proc.stdout)
43
  else:
44
- # Show stderr when present to help debugging; not used for logic
45
- if proc.stderr:
46
- print(proc.stderr)
47
- else:
48
- print("nvidia-smi not available or returned no output.")
49
  except FileNotFoundError:
50
- print("nvidia-smi not found on PATH.")
51
 
52
  print_nvidia_smi()
53
 
54
- # ---- Device + dtype selection (robust) ----
55
- is_gpu = torch.cuda.is_available()
56
- print(f"CUDA available: {is_gpu}")
57
-
58
- device = torch.device("cuda") if is_gpu else torch.device("cpu")
59
- dtype = torch.float16 if is_gpu else torch.float32
60
-
61
- # ---- Helpers to only pass 'variant' when needed (Diffusers <=0.23 friendly) ----
62
- def _add_variant(kwargs: dict) -> dict:
63
- """Only include 'variant' when running on GPU."""
64
- if is_gpu:
65
- kwargs = dict(kwargs) # shallow copy
66
- kwargs["variant"] = "fp16"
67
- return kwargs
68
-
69
- # ---- Pipeline setup ----
70
- if not SSD_1B:
71
- # SDXL base + LCM UNet
72
- unet = UNet2DConditionModel.from_pretrained(
73
- "latent-consistency/lcm-sdxl",
74
- torch_dtype=dtype,
75
- cache_dir=cache_path,
76
- **_add_variant({})
77
- )
78
- pipe = DiffusionPipeline.from_pretrained(
79
- "stabilityai/stable-diffusion-xl-base-1.0",
80
- unet=unet,
81
- torch_dtype=dtype,
82
- cache_dir=cache_path,
83
- **_add_variant({})
84
- )
85
- pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
86
- pipe.to(device)
87
- else:
88
- # SSD-1B + LCM LoRA
89
- from diffusers import AutoPipelineForText2Image
90
- pipe = AutoPipelineForText2Image.from_pretrained(
91
- "segmind/SSD-1B",
92
- torch_dtype=dtype,
93
- cache_dir=cache_path,
94
- **_add_variant({})
95
- )
96
- pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
97
- pipe.to(device)
98
- pipe.load_lora_weights("latent-consistency/lcm-lora-ssd-1b")
99
- pipe.fuse_lora()
100
 
101
- # ---- Core generate function ----
102
- def generate(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  prompt: str,
104
  negative_prompt: str = "",
105
- seed: int = 0,
106
- width: int = 1024,
107
- height: int = 1024,
108
  guidance_scale: float = 0.0,
109
  num_inference_steps: int = 4,
110
- secret_token: str = "",
111
  ) -> Image.Image:
112
- # Token gate
113
- if secret_token != SECRET_TOKEN:
114
- raise gr.Error("Invalid secret token. Set SECRET_TOKEN on the server or pass the correct token.")
115
 
116
- # Clamp sizes (avoid OOM on CPU)
117
  width = int(np.clip(width, 256, MAX_IMAGE_SIZE))
118
  height = int(np.clip(height, 256, MAX_IMAGE_SIZE))
 
 
119
 
120
- # Deterministic generator on the active device
121
- generator = torch.Generator(device=device)
122
  if seed is not None:
123
  generator = generator.manual_seed(int(seed))
124
 
125
- out = pipe(
126
  prompt=prompt,
127
  negative_prompt=negative_prompt,
128
  width=width,
129
  height=height,
130
- guidance_scale=guidance_scale,
131
  num_inference_steps=num_inference_steps,
132
  generator=generator,
133
  output_type="pil",
134
  )
135
- return out.images[0]
136
 
137
- # ---- Optional notebook helper ----
138
- def generate_image(prompt="A scenic watercolor landscape, mountains at dawn"):
139
- img = generate(
 
 
 
 
 
 
 
 
 
 
 
140
  prompt=prompt,
141
- negative_prompt="",
142
- seed=0,
143
- width=1024,
144
- height=1024,
145
- guidance_scale=0.0,
146
- num_inference_steps=4,
147
- secret_token=SECRET_TOKEN,
148
  )
 
 
 
149
  try:
150
- from IPython.display import display
151
- display(img)
152
- except Exception:
153
- pass # Non-notebook environment
154
-
155
- # ---- UI (Gradio 3.39.0 components) ----
156
- if not run_api:
157
- secret_token = gr.Textbox(
158
- label="Secret Token",
159
- placeholder="Enter your secret token",
160
- type="password",
161
- )
162
- prompt = gr.Textbox(
163
- label="Prompt",
164
- show_label=True,
165
- max_lines=2,
166
- placeholder="Enter your prompt",
167
- )
168
- negative_prompt = gr.Textbox(
169
- label="Negative prompt",
170
- max_lines=2,
171
- placeholder="Enter a negative prompt (optional)",
172
- )
173
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
174
- width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
175
- height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
176
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0, maximum=2, step=0.1, value=0.0)
177
- num_inference_steps = gr.Slider(label="Inference steps", minimum=1, maximum=8, step=1, value=4)
178
-
179
- iface = gr.Interface(
180
- fn=generate,
181
- inputs=[prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps, secret_token],
182
- outputs=gr.Image(label="Result"),
183
- title="Image Generator (LCM)",
184
- description="Fast SDXL/SSD-1B image generation with LCM. Uses CPU if CUDA is unavailable.",
185
- )
186
- iface.launch()
187
 
188
- if run_api:
189
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  gr.Markdown(
191
- "### REST API for LCM Text-to-Image\n"
192
- "Use the `/run` endpoint programmatically with your secret."
 
193
  )
194
- secret_token = gr.Textbox(label="Secret Token", type="password")
195
- prompt = gr.Textbox(label="Prompt")
196
- negative_prompt = gr.Textbox(label="Negative prompt")
197
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
198
- width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
199
- height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
200
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0, maximum=2, step=0.1, value=0.0)
201
- num_inference_steps = gr.Slider(label="Inference steps", minimum=1, maximum=8, step=1, value=4)
202
-
203
- inputs = [prompt, negative_prompt, seed, width, height, guidance_scale, num_inference_steps, secret_token]
204
- prompt.submit(fn=generate, inputs=inputs, outputs=gr.Image(), api_name="run")
205
-
206
- demo.queue(max_size=32).launch(debug=False)
 
 
 
 
 
 
1
+ # -------------------------------
2
+ # AI Fast Image Server (Production)
3
+ # -------------------------------
4
 
5
+ from __future__ import annotations
6
  import os
7
+ import sys
8
+ import logging
9
  import subprocess
10
+ from typing import Optional
11
 
12
+ # ---------- Early, safe env defaults ----------
13
+ os.environ.setdefault("HF_HUB_ENABLE_HF_TRANSFER", "1") # faster model downloads
14
+ os.environ.setdefault("DEEPSPEED_DISABLE_NVML", "1") # silence NVML in headless envs
15
+ os.environ.setdefault("BITSANDBYTES_NOWELCOME", "1")
16
+
17
+ # ---------- Logging ----------
18
+ logging.basicConfig(
19
+ level=os.environ.get("LOG_LEVEL", "INFO").upper(),
20
+ format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
21
+ stream=sys.stdout,
22
+ )
23
+ log = logging.getLogger("ai-fast-image-server")
24
 
25
+ # ---------- Config via ENV ----------
26
+ # MODEL_BACKEND: sdxl_lcm_unet (heavy), sdxl_lcm_lora (light), ssd1b_lcm_lora (light)
27
+ MODEL_BACKEND = os.getenv("MODEL_BACKEND", "sdxl_lcm_lora").lower()
28
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
29
+ DEFAULT_SIZE = int(os.getenv("DEFAULT_SIZE", "1024"))
30
+ SECRET_TOKEN = os.getenv("SECRET_TOKEN", "default_secret")
31
+ PORT = int(os.getenv("PORT", "7860"))
32
+ CONCURRENCY = int(os.getenv("CONCURRENCY", "2"))
33
+ QUEUE_SIZE = int(os.getenv("QUEUE_SIZE", "32"))
34
+ ENABLE_SSR = os.getenv("ENABLE_SSR", "false").lower() == "true" # SSR can be flaky; default off
35
+
36
+ # ---------- Imports that require deps ----------
37
  import warnings
38
  warnings.filterwarnings("ignore", message="Can't initialize NVML")
39
 
40
+ import numpy as np
41
  import torch
 
42
  from PIL import Image
43
+ import gradio as gr
44
+ from diffusers import (
45
+ DiffusionPipeline,
46
+ UNet2DConditionModel,
47
+ LCMScheduler,
48
+ AutoPipelineForText2Image,
49
+ )
50
 
51
+ # ---------- Version guard: Torch 2.1 + NumPy 2.x is incompatible ----------
52
+ try:
53
+ _np_major = int(np.__version__.split(".")[0])
54
+ if torch.__version__.startswith("2.1") and _np_major >= 2:
55
+ raise RuntimeError(
56
+ f"Incompatible versions: torch=={torch.__version__} with numpy=={np.__version__}. "
57
+ "Pin numpy==1.26.4 or upgrade torch to >=2.3."
58
+ )
59
+ except Exception as e:
60
+ log.error(str(e))
61
+ raise
62
 
63
+ # ---------- Paths ----------
64
+ CURRENT_DIR = os.getcwd()
65
+ CACHE_DIR = os.path.join(CURRENT_DIR, "cache")
66
+ os.makedirs(CACHE_DIR, exist_ok=True)
67
 
68
+ # ---------- GPU info (logs only) ----------
69
+ def print_nvidia_smi() -> None:
70
  try:
71
+ proc = subprocess.run(["nvidia-smi"], capture_output=True, text=True, check=False)
72
  if proc.returncode == 0 and proc.stdout.strip():
73
+ log.info("\n" + proc.stdout.strip())
74
  else:
75
+ msg = proc.stderr.strip() if proc.stderr else "nvidia-smi not available or returned no output."
76
+ log.info(msg)
 
 
 
77
  except FileNotFoundError:
78
+ log.info("nvidia-smi not found on PATH.")
79
 
80
  print_nvidia_smi()
81
 
82
+ IS_GPU = torch.cuda.is_available()
83
+ DEVICE = torch.device("cuda") if IS_GPU else torch.device("cpu")
84
+ DTYPE = torch.float16 if IS_GPU else torch.float32
85
+ log.info(f"CUDA available: {IS_GPU} | device={DEVICE} | dtype={DTYPE}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
+ # ---------- Torch perf knobs ----------
88
+ try:
89
+ if IS_GPU:
90
+ torch.backends.cuda.matmul.allow_tf32 = True # safe perf on Ampere+
91
+ torch.set_float32_matmul_precision("high")
92
+ except Exception:
93
+ pass
94
+
95
+ # ---------- Helpers ----------
96
+ def _variant_kwargs() -> dict:
97
+ # use fp16 repo variants only on GPU
98
+ return {"variant": "fp16"} if IS_GPU else {}
99
+
100
+ def _cpu_safety_settings(pipe: DiffusionPipeline) -> None:
101
+ # reduce RAM usage and avoid giant VAE allocations on CPU
102
+ try:
103
+ pipe.enable_vae_tiling()
104
+ except Exception:
105
+ pass
106
+
107
+ def _gpu_memory_efficiency(pipe: DiffusionPipeline) -> None:
108
+ # enable memory-efficient attention when available
109
+ enabled = False
110
+ try:
111
+ pipe.enable_xformers_memory_efficient_attention()
112
+ enabled = True
113
+ except Exception:
114
+ try:
115
+ pipe.enable_attention_slicing("max")
116
+ enabled = True
117
+ except Exception:
118
+ pass
119
+ if enabled:
120
+ try:
121
+ pipe.enable_vae_tiling()
122
+ except Exception:
123
+ pass
124
+
125
+ # ---------- Model loading ----------
126
+ pipe: Optional[DiffusionPipeline] = None
127
+
128
+ def load_pipeline() -> DiffusionPipeline:
129
+ """
130
+ Load the selected backend with sensible defaults.
131
+ - sdxl_lcm_unet: SDXL base + full LCM UNet (heavy, high VRAM)
132
+ - sdxl_lcm_lora: SDXL base + LCM-LoRA (light, recommended)
133
+ - ssd1b_lcm_lora: SSD-1B + LCM-LoRA (light)
134
+ """
135
+ log.info(f"Loading model backend: {MODEL_BACKEND}")
136
+
137
+ if MODEL_BACKEND == "sdxl_lcm_unet":
138
+ # Heavy: downloads ~10 GB UNet; best quality/speed on big GPUs
139
+ unet = UNet2DConditionModel.from_pretrained(
140
+ "latent-consistency/lcm-sdxl",
141
+ torch_dtype=DTYPE,
142
+ cache_dir=CACHE_DIR,
143
+ **_variant_kwargs(),
144
+ )
145
+ _pipe = DiffusionPipeline.from_pretrained(
146
+ "stabilityai/stable-diffusion-xl-base-1.0",
147
+ unet=unet,
148
+ torch_dtype=DTYPE,
149
+ cache_dir=CACHE_DIR,
150
+ **_variant_kwargs(),
151
+ )
152
+ elif MODEL_BACKEND == "ssd1b_lcm_lora":
153
+ _pipe = AutoPipelineForText2Image.from_pretrained(
154
+ "segmind/SSD-1B",
155
+ torch_dtype=DTYPE,
156
+ cache_dir=CACHE_DIR,
157
+ **_variant_kwargs(),
158
+ )
159
+ _pipe.load_lora_weights("latent-consistency/lcm-lora-ssd-1b")
160
+ _pipe.fuse_lora()
161
+ else:
162
+ # Default & recommended: SDXL + LCM-LoRA (smaller downloads, good quality)
163
+ _pipe = DiffusionPipeline.from_pretrained(
164
+ "stabilityai/stable-diffusion-xl-base-1.0",
165
+ torch_dtype=DTYPE,
166
+ cache_dir=CACHE_DIR,
167
+ **_variant_kwargs(),
168
+ )
169
+ _pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
170
+ _pipe.fuse_lora()
171
+
172
+ # Use LCM scheduler
173
+ _pipe.scheduler = LCMScheduler.from_config(_pipe.scheduler.config)
174
+
175
+ # Device & memory efficiency
176
+ _pipe.to(DEVICE)
177
+ if IS_GPU:
178
+ _gpu_memory_efficiency(_pipe)
179
+ else:
180
+ _cpu_safety_settings(_pipe)
181
+
182
+ log.info("Pipeline loaded.")
183
+ return _pipe
184
+
185
+ # warmup lazily
186
+ def ensure_pipe() -> DiffusionPipeline:
187
+ global pipe
188
+ if pipe is None:
189
+ pipe = load_pipeline()
190
+ return pipe
191
+
192
+ # ---------- HF Spaces GPU decorator (fixes “No @spaces.GPU function detected”) ----------
193
+ try:
194
+ import spaces # type: ignore
195
+ GPU_DECORATOR = spaces.GPU
196
+ log.info("`spaces` package detected. GPU-decorating inference function.")
197
+ except Exception:
198
+ GPU_DECORATOR = lambda f: f # no-op
199
+
200
+ # ---------- Inference ----------
201
+ @gpu_dec := GPU_DECORATOR
202
+ def generate_image_internal(
203
  prompt: str,
204
  negative_prompt: str = "",
205
+ seed: Optional[int] = 0,
206
+ width: int = DEFAULT_SIZE,
207
+ height: int = DEFAULT_SIZE,
208
  guidance_scale: float = 0.0,
209
  num_inference_steps: int = 4,
 
210
  ) -> Image.Image:
211
+ _pipe = ensure_pipe()
 
 
212
 
213
+ # Clamp to safe bounds
214
  width = int(np.clip(width, 256, MAX_IMAGE_SIZE))
215
  height = int(np.clip(height, 256, MAX_IMAGE_SIZE))
216
+ num_inference_steps = int(np.clip(num_inference_steps, 1, 12))
217
+ guidance_scale = float(np.clip(guidance_scale, 0.0, 2.0))
218
 
219
+ # Deterministic generator
220
+ generator = torch.Generator(device=DEVICE)
221
  if seed is not None:
222
  generator = generator.manual_seed(int(seed))
223
 
224
+ result = _pipe(
225
  prompt=prompt,
226
  negative_prompt=negative_prompt,
227
  width=width,
228
  height=height,
229
+ guidance_scale=guidance_scale, # LCM prefers low/no guidance
230
  num_inference_steps=num_inference_steps,
231
  generator=generator,
232
  output_type="pil",
233
  )
234
+ return result.images[0]
235
 
236
+ # thin wrapper that enforces the token (kept out of the GPU-decorated function)
237
+ def generate(
238
+ prompt: str,
239
+ negative_prompt: str = "",
240
+ seed: int = 0,
241
+ width: int = DEFAULT_SIZE,
242
+ height: int = DEFAULT_SIZE,
243
+ guidance_scale: float = 0.0,
244
+ num_inference_steps: int = 4,
245
+ secret_token: str = "",
246
+ ) -> Image.Image:
247
+ if secret_token != SECRET_TOKEN:
248
+ raise gr.Error("Invalid secret token. Set SECRET_TOKEN or pass the correct token.")
249
+ return generate_image_internal(
250
  prompt=prompt,
251
+ negative_prompt=negative_prompt,
252
+ seed=seed,
253
+ width=width,
254
+ height=height,
255
+ guidance_scale=guidance_scale,
256
+ num_inference_steps=num_inference_steps,
 
257
  )
258
+
259
+ # ---------- Optional warmup at startup ----------
260
+ def warmup():
261
  try:
262
+ ensure_pipe()
263
+ _ = generate_image_internal(
264
+ prompt="A quick warmup prompt, minimal style", seed=42, width=512, height=512, num_inference_steps=2
265
+ )
266
+ log.info("Warmup complete.")
267
+ except Exception as e:
268
+ log.warning(f"Warmup skipped or failed: {e}")
269
+
270
+ if os.getenv("WARMUP", "true").lower() == "true":
271
+ # Don't block too long on CPU
272
+ if IS_GPU:
273
+ warmup()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
274
 
275
+ # ---------- Gradio UI (v5) ----------
276
+ def build_ui() -> gr.Blocks:
277
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
278
+ gr.Markdown("## Image Generator (LCM) — SDXL / SSD-1B")
279
+
280
+ with gr.Row():
281
+ prompt = gr.Textbox(label="Prompt", lines=3, placeholder="Describe the image...")
282
+ negative = gr.Textbox(label="Negative Prompt", lines=2, placeholder="(optional)")
283
+
284
+ with gr.Row():
285
+ seed = gr.Number(label="Seed", value=0, precision=0)
286
+ width = gr.Slider(256, MAX_IMAGE_SIZE, value=DEFAULT_SIZE, step=32, label="Width")
287
+ height = gr.Slider(256, MAX_IMAGE_SIZE, value=DEFAULT_SIZE, step=32, label="Height")
288
+
289
+ with gr.Row():
290
+ guidance = gr.Slider(0.0, 2.0, value=0.0, step=0.1, label="Guidance scale")
291
+ steps = gr.Slider(1, 12, value=4, step=1, label="Inference steps")
292
+ token = gr.Textbox(label="Secret Token", type="password", lines=1)
293
+
294
+ out = gr.Image(label="Result", height=DEFAULT_SIZE, width=DEFAULT_SIZE)
295
+ run = gr.Button("Generate", variant="primary")
296
+
297
+ inputs = [prompt, negative, seed, width, height, guidance, steps, token]
298
+ run.click(fn=generate, inputs=inputs, outputs=out, concurrency_limit=CONCURRENCY)
299
+
300
+ # Simple health info
301
  gr.Markdown(
302
+ f"*Backend:* `{MODEL_BACKEND}` &nbsp; | &nbsp; "
303
+ f"*Device:* `{DEVICE}` &nbsp; | &nbsp; "
304
+ f"*dtype:* `{DTYPE}`"
305
  )
306
+ return demo
307
+
308
+ # ---------- Launch ----------
309
+ def main():
310
+ demo = build_ui()
311
+ # Queue for backpressure and concurrency control
312
+ demo.queue(max_size=QUEUE_SIZE, concurrency_count=CONCURRENCY)
313
+ demo.launch(
314
+ server_name="0.0.0.0",
315
+ server_port=PORT,
316
+ show_api=True,
317
+ ssr_mode=ENABLE_SSR, # SSR off by default (can be flaky on Spaces)
318
+ share=False,
319
+ show_error=True,
320
+ )
321
+
322
+ if __name__ == "__main__":
323
+ main()