linoyts HF Staff commited on
Commit
cada4f8
·
verified ·
1 Parent(s): bd0ddb3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -35
app.py CHANGED
@@ -98,13 +98,13 @@ def polish_prompt_hf(prompt, img):
98
  """
99
  # Ensure HF_TOKEN is set
100
  api_key = os.environ.get("HF_TOKEN")
101
- prompt = f"{SYSTEM_PROMPT}\n\nUser Input: {prompt}\n\nRewritten Prompt:"
102
  if not api_key:
103
  print("Warning: HF_TOKEN not set. Falling back to original prompt.")
104
- return original_prompt
105
 
106
  try:
107
  # Initialize the client
 
108
  client = InferenceClient(
109
  provider="cerebras",
110
  api_key=api_key,
@@ -152,7 +152,7 @@ def polish_prompt_hf(prompt, img):
152
  except Exception as e:
153
  print(f"Error during API call to Hugging Face: {e}")
154
  # Fallback to original prompt if enhancement fails
155
- return original_prompt
156
 
157
  # def polish_prompt(prompt, img):
158
  # prompt = f"{SYSTEM_PROMPT}\n\nUser Input: {prompt}\n\nRewritten Prompt:"
@@ -184,37 +184,6 @@ def encode_image(pil_image):
184
  pil_image.save(buffered, format="PNG")
185
  return base64.b64encode(buffered.getvalue()).decode("utf-8")
186
 
187
-
188
- # def api(prompt, img_list, model="qwen-vl-max-latest", kwargs={}):
189
- # import dashscope
190
- # api_key = os.environ.get('DASH_API_KEY')
191
- # if not api_key:
192
- # raise EnvironmentError("DASH_API_KEY is not set")
193
- # assert model in ["qwen-vl-max-latest"], f"Not implemented model {model}"
194
- # sys_promot = "you are a helpful assistant, you should provide useful answers to users."
195
- # messages = [
196
- # {"role": "system", "content": sys_promot},
197
- # {"role": "user", "content": []}]
198
- # for img in img_list:
199
- # messages[1]["content"].append(
200
- # {"image": f"data:image/png;base64,{encode_image(img)}"})
201
- # messages[1]["content"].append({"text": f"{prompt}"})
202
-
203
- # response_format = kwargs.get('response_format', None)
204
-
205
- # response = dashscope.MultiModalConversation.call(
206
- # api_key=api_key,
207
- # model=model, # For example, use qwen-plus here. You can change the model name as needed. Model list: https://help.aliyun.com/zh/model-studio/getting-started/models
208
- # messages=messages,
209
- # result_format='message',
210
- # response_format=response_format,
211
- # )
212
-
213
- # if response.status_code == 200:
214
- # return response.output.choices[0].message.content[0]['text']
215
- # else:
216
- # raise Exception(f'Failed to post: {response}')
217
-
218
  # --- Model Loading ---
219
  dtype = torch.bfloat16
220
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -327,6 +296,12 @@ css = """
327
  margin: 0 auto;
328
  max-width: 1024px;
329
  }
 
 
 
 
 
 
330
  #edit_text{margin-top: -62px !important}
331
  """
332
 
@@ -335,7 +310,7 @@ with gr.Blocks(css=css) as demo:
335
  gr.HTML("""
336
  <div id="logo-title">
337
  <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png" alt="Qwen-Image Edit Logo" width="400" style="display: block; margin: 0 auto;">
338
- <h2 style="font-style: italic;color: #5b47d1;margin-top: -27px !important;margin-left: 96px">Fast, 8-steps with Lightning LoRA</h2>
339
  </div>
340
  """)
341
  gr.Markdown("""
 
98
  """
99
  # Ensure HF_TOKEN is set
100
  api_key = os.environ.get("HF_TOKEN")
 
101
  if not api_key:
102
  print("Warning: HF_TOKEN not set. Falling back to original prompt.")
103
+ return prompt
104
 
105
  try:
106
  # Initialize the client
107
+ prompt = f"{SYSTEM_PROMPT}\n\nUser Input: {prompt}\n\nRewritten Prompt:"
108
  client = InferenceClient(
109
  provider="cerebras",
110
  api_key=api_key,
 
152
  except Exception as e:
153
  print(f"Error during API call to Hugging Face: {e}")
154
  # Fallback to original prompt if enhancement fails
155
+ return prompt
156
 
157
  # def polish_prompt(prompt, img):
158
  # prompt = f"{SYSTEM_PROMPT}\n\nUser Input: {prompt}\n\nRewritten Prompt:"
 
184
  pil_image.save(buffered, format="PNG")
185
  return base64.b64encode(buffered.getvalue()).decode("utf-8")
186
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  # --- Model Loading ---
188
  dtype = torch.bfloat16
189
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
296
  margin: 0 auto;
297
  max-width: 1024px;
298
  }
299
+ #logo-title {
300
+ text-align: center;
301
+ }
302
+ #logo-title img {
303
+ width: 400px;
304
+ }
305
  #edit_text{margin-top: -62px !important}
306
  """
307
 
 
310
  gr.HTML("""
311
  <div id="logo-title">
312
  <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-Image/qwen_image_edit_logo.png" alt="Qwen-Image Edit Logo" width="400" style="display: block; margin: 0 auto;">
313
+ <h2 style="font-style: italic;color: #5b47d1;margin-top: -27px !important;margin-left: 96px">2509 Fast, 8-steps with Lightning LoRA</h2>
314
  </div>
315
  """)
316
  gr.Markdown("""