Commit
·
86eb84c
0
Parent(s):
image: Initial.
Browse filesSigned-off-by: Hadad <hadad@linuxmail.org>
- .gitattributes +35 -0
- Dockerfile +22 -0
- README.md +87 -0
- app.py +60 -0
- config.py +49 -0
- requirements.txt +2 -0
.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
Dockerfile
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
# Use a specific container image for the app
|
| 7 |
+
FROM python:latest
|
| 8 |
+
|
| 9 |
+
# Set the main working directory inside the container
|
| 10 |
+
WORKDIR /app
|
| 11 |
+
|
| 12 |
+
# Copy all files into the container
|
| 13 |
+
COPY . .
|
| 14 |
+
|
| 15 |
+
# Install all dependencies
|
| 16 |
+
RUN pip install -r requirements.txt
|
| 17 |
+
|
| 18 |
+
# Open the port so the app can be accessed
|
| 19 |
+
EXPOSE 7860
|
| 20 |
+
|
| 21 |
+
# Start the app
|
| 22 |
+
CMD ["python", "app.py"]
|
README.md
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Image Generation Playground
|
| 3 |
+
short_description: Turn your ideas into an image
|
| 4 |
+
license: apache-2.0
|
| 5 |
+
emoji: ⚡
|
| 6 |
+
colorFrom: indigo
|
| 7 |
+
colorTo: purple
|
| 8 |
+
sdk: docker
|
| 9 |
+
app_port: 7860
|
| 10 |
+
pinned: false
|
| 11 |
+
# Used to promote this Hugging Face Space
|
| 12 |
+
models:
|
| 13 |
+
- hadadrjt/JARVIS
|
| 14 |
+
- agentica-org/DeepCoder-14B-Preview
|
| 15 |
+
- agentica-org/DeepSWE-Preview
|
| 16 |
+
- fka/awesome-chatgpt-prompts
|
| 17 |
+
- black-forest-labs/FLUX.1-Kontext-dev
|
| 18 |
+
- ChatDOC/OCRFlux-3B
|
| 19 |
+
- deepseek-ai/DeepSeek-R1
|
| 20 |
+
- deepseek-ai/DeepSeek-R1-0528
|
| 21 |
+
- deepseek-ai/DeepSeek-R1-Distill-Llama-70B
|
| 22 |
+
- deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
|
| 23 |
+
- deepseek-ai/DeepSeek-R1-0528-Qwen3-8B
|
| 24 |
+
- deepseek-ai/DeepSeek-V3-0324
|
| 25 |
+
- google/gemma-3-1b-it
|
| 26 |
+
- google/gemma-3-27b-it
|
| 27 |
+
- google/gemma-3-4b-it
|
| 28 |
+
- google/gemma-3n-E4B-it
|
| 29 |
+
- google/gemma-3n-E4B-it-litert-preview
|
| 30 |
+
- google/medsiglip-448
|
| 31 |
+
- kyutai/tts-1.6b-en_fr
|
| 32 |
+
- meta-llama/Llama-3.1-8B-Instruct
|
| 33 |
+
- meta-llama/Llama-3.2-3B-Instruct
|
| 34 |
+
- meta-llama/Llama-3.3-70B-Instruct
|
| 35 |
+
- meta-llama/Llama-4-Maverick-17B-128E-Instruct
|
| 36 |
+
- meta-llama/Llama-4-Scout-17B-16E-Instruct
|
| 37 |
+
- microsoft/Phi-4-mini-instruct
|
| 38 |
+
- mistralai/Devstral-Small-2505
|
| 39 |
+
- mistralai/Mistral-Small-3.1-24B-Instruct-2503
|
| 40 |
+
- openai/webgpt_comparisons
|
| 41 |
+
- openai/whisper-large-v3-turbo
|
| 42 |
+
- openai/gpt-oss-120b
|
| 43 |
+
- openai/gpt-oss-20b
|
| 44 |
+
- Qwen/QwQ-32B
|
| 45 |
+
- Qwen/Qwen2.5-VL-32B-Instruct
|
| 46 |
+
- Qwen/Qwen2.5-VL-3B-Instruct
|
| 47 |
+
- Qwen/Qwen2.5-VL-72B-Instruct
|
| 48 |
+
- Qwen/Qwen3-235B-A22B
|
| 49 |
+
- THUDM/GLM-4.1V-9B-Thinking
|
| 50 |
+
- tngtech/DeepSeek-TNG-R1T2-Chimera
|
| 51 |
+
- moonshotai/Kimi-K2-Instruct
|
| 52 |
+
- Qwen/Qwen3-235B-A22B-Instruct-2507
|
| 53 |
+
- Qwen/Qwen3-Coder-480B-A35B-Instruct
|
| 54 |
+
- Qwen/Qwen3-235B-A22B-Thinking-2507
|
| 55 |
+
- zai-org/GLM-4.5
|
| 56 |
+
- zai-org/GLM-4.5-Air
|
| 57 |
+
- zai-org/GLM-4.5V
|
| 58 |
+
- deepseek-ai/DeepSeek-V3.1
|
| 59 |
+
- deepseek-ai/DeepSeek-V3.1-Base
|
| 60 |
+
- microsoft/VibeVoice-1.5B
|
| 61 |
+
- xai-org/grok-2
|
| 62 |
+
- Qwen/Qwen-Image-Edit
|
| 63 |
+
- ByteDance-Seed/Seed-OSS-36B-Instruct
|
| 64 |
+
- google/gemma-3-270m
|
| 65 |
+
- google/gemma-3-270m-it
|
| 66 |
+
- openbmb/MiniCPM-V-4_5
|
| 67 |
+
- tencent/Hunyuan-MT-7B
|
| 68 |
+
- meituan-longcat/LongCat-Flash-Chat
|
| 69 |
+
- Phr00t/WAN2.2-14B-Rapid-AllInOne
|
| 70 |
+
- apple/FastVLM-0.5B
|
| 71 |
+
- stepfun-ai/Step-Audio-2-mini
|
| 72 |
+
- tencent/SRPO
|
| 73 |
+
- baidu/ERNIE-4.5-21B-A3B-Thinking
|
| 74 |
+
- tencent/HunyuanImage-2.1
|
| 75 |
+
- Qwen/Qwen3-Next-80B-A3B-Instruct
|
| 76 |
+
- google/embeddinggemma-300m
|
| 77 |
+
- Qwen/Qwen3-Next-80B-A3B-Thinking
|
| 78 |
+
- LLM360/K2-Think
|
| 79 |
+
- IndexTeam/IndexTTS-2
|
| 80 |
+
- Turn your ideas into an image
|
| 81 |
+
- facebook/MobileLLM-R1-950M
|
| 82 |
+
- Alibaba-NLP/Tongyi-DeepResearch-30B-A3B
|
| 83 |
+
- openbmb/VoxCPM-0.5B
|
| 84 |
+
# Used to promote this Hugging Face Space
|
| 85 |
+
datasets:
|
| 86 |
+
- fka/awesome-chatgpt-prompts
|
| 87 |
+
---
|
app.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
import os
|
| 7 |
+
import openai
|
| 8 |
+
import base64
|
| 9 |
+
from io import BytesIO
|
| 10 |
+
from PIL import Image
|
| 11 |
+
from config import MODEL, SIZE, DESCRIPTION, EXAMPLES
|
| 12 |
+
import gradio as gr
|
| 13 |
+
|
| 14 |
+
def playground(model, size, prompt):
|
| 15 |
+
client = openai.OpenAI(
|
| 16 |
+
base_url=os.getenv("OPENAI_API_BASE_URL"),
|
| 17 |
+
api_key=os.getenv("OPENAI_API_KEY")
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
response = client.images.generate(
|
| 21 |
+
model=model,
|
| 22 |
+
prompt=prompt,
|
| 23 |
+
size=size,
|
| 24 |
+
n=1,
|
| 25 |
+
response_format="b64_json"
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
return Image.open(
|
| 29 |
+
BytesIO(
|
| 30 |
+
base64.b64decode(
|
| 31 |
+
response.data[0].b64_json
|
| 32 |
+
)
|
| 33 |
+
)
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
with gr.Blocks() as app:
|
| 37 |
+
with gr.Sidebar():
|
| 38 |
+
gr.HTML(DESCRIPTION)
|
| 39 |
+
gr.Interface(
|
| 40 |
+
fn=playground,
|
| 41 |
+
inputs=[
|
| 42 |
+
gr.Dropdown(MODEL, label="MODEL"),
|
| 43 |
+
gr.Dropdown(SIZE, label="IMAGE SIZE"),
|
| 44 |
+
gr.Textbox(
|
| 45 |
+
label="INSTRUCTIONS",
|
| 46 |
+
placeholder="Insert your prompt here..."
|
| 47 |
+
)
|
| 48 |
+
],
|
| 49 |
+
outputs=gr.Image(
|
| 50 |
+
label="GENERATED IMAGE", type="pil"
|
| 51 |
+
),
|
| 52 |
+
examples=EXAMPLES,
|
| 53 |
+
show_api=False,
|
| 54 |
+
concurrency_limit=3
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
app.launch(
|
| 58 |
+
server_name="0.0.0.0",
|
| 59 |
+
pwa=True
|
| 60 |
+
)
|
config.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# SPDX-FileCopyrightText: Hadad <hadad@linuxmail.org>
|
| 3 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 4 |
+
#
|
| 5 |
+
|
| 6 |
+
MODEL = [
|
| 7 |
+
"flux", "kontext", "turbo"
|
| 8 |
+
]
|
| 9 |
+
|
| 10 |
+
SIZE = [
|
| 11 |
+
"256x256", "512x512", "1024x1024",
|
| 12 |
+
"512x768", "768x1024", "1024x1536",
|
| 13 |
+
"768x512", "1024x768", "1536x1024"
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
EXAMPLES=[
|
| 17 |
+
["flux", "1024x1024", "A woman riding a horse in the forest"],
|
| 18 |
+
["kontext", "1024x1024",
|
| 19 |
+
"A cozy cabin in the snowy mountains, warm lights glowing inside"],
|
| 20 |
+
["turbo", "1024x1024",
|
| 21 |
+
"A cute cat astronaut floating in space, holding a little flag"],
|
| 22 |
+
["kontext", "1024x1024",
|
| 23 |
+
"An ancient library filled with floating books and magical lights"]
|
| 24 |
+
]
|
| 25 |
+
|
| 26 |
+
DESCRIPTION = """
|
| 27 |
+
Welcome to the <b>Image Generation Playground</b>, where imagination transforms
|
| 28 |
+
into visual creations.
|
| 29 |
+
<br><br>
|
| 30 |
+
|
| 31 |
+
For your information, <b>image generation</b> and <b>OpenAI Audio TTS</b>
|
| 32 |
+
is already built-in natively in the primary spaces.
|
| 33 |
+
<br><br>
|
| 34 |
+
|
| 35 |
+
For more advanced capabilities,
|
| 36 |
+
please visit the <b><a href='https://umint-openwebui.hf.space'
|
| 37 |
+
target='_blank'>UltimaX Intelligence</a></b> primary spaces.
|
| 38 |
+
<br><br>
|
| 39 |
+
|
| 40 |
+
Please consider reading the <b><a href=
|
| 41 |
+
'https://huggingface.co/spaces/umint/ai/discussions/37#68b55209c51ca52ed299db4c'
|
| 42 |
+
target='_blank'>Terms of Use and Consequences of Violation</a></b>
|
| 43 |
+
if you wish to proceed to the main Spaces.
|
| 44 |
+
<br><br>
|
| 45 |
+
|
| 46 |
+
<b>Like this project? Feel free to buy me a
|
| 47 |
+
<a href='https://ko-fi.com/hadad' target='_blank'>
|
| 48 |
+
coffee</a></b>.
|
| 49 |
+
"""
|
requirements.txt
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio[oauth,mcp]
|
| 2 |
+
openai
|