openfree commited on
Commit
8360e3e
ยท
verified ยท
1 Parent(s): 088a9e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -875
app.py CHANGED
@@ -1,27 +1,18 @@
1
  import gradio as gr
2
- from huggingface_hub import HfApi, create_repo
3
- from git import Repo
4
  import uuid
5
  from slugify import slugify
6
  import os
7
- import sys
8
  import json
9
- import argparse
10
  import subprocess
11
  import tempfile
12
- import textwrap
13
  import requests
14
  import shutil
15
  import time
16
  from pathlib import Path
17
- from typing import Optional, Dict, List, Tuple
18
 
19
- # ========== LFS ์ฒ˜๋ฆฌ ํ•จ์ˆ˜ (์ฒซ ๋ฒˆ์งธ ์ฝ”๋“œ์—์„œ) ========== #
20
  def is_lfs_pointer_file(filepath):
21
- """Check if a file is a Git LFS pointer file."""
22
- # Initialize analysis
23
- analysis = {}
24
-
25
  try:
26
  with open(filepath, 'rb') as f:
27
  header = f.read(100)
@@ -30,66 +21,50 @@ def is_lfs_pointer_file(filepath):
30
  return False
31
 
32
  def remove_lfs_files(folder):
33
- """Remove all LFS pointer files from the repository."""
34
  removed_files = []
35
  for root, dirs, files in os.walk(folder):
36
- # Skip .git directory
37
  if '.git' in root:
38
  continue
39
-
40
  for file in files:
41
  filepath = os.path.join(root, file)
42
  if is_lfs_pointer_file(filepath):
43
  os.remove(filepath)
44
  removed_files.append(filepath.replace(folder + os.sep, ''))
45
-
46
  return removed_files
47
 
48
- # ========== Repository ๋ถ„์„ ํ•จ์ˆ˜ (๋‘ ๋ฒˆ์งธ ์ฝ”๋“œ์—์„œ) ========== #
49
  def analyze_repository(src_path: Path) -> Dict:
50
- """๋ ˆํฌ์ง€ํ† ๋ฆฌ ๊ตฌ์กฐ์™€ ๋‚ด์šฉ์„ ๋ถ„์„ํ•˜์—ฌ ์ •๋ณด ์ถ”์ถœ"""
51
  analysis = {
52
  "has_requirements": False,
53
  "has_readme": False,
54
- "has_setup_py": False,
55
  "main_language": "python",
56
  "key_files": [],
57
  "dependencies": [],
58
  "description": "",
59
- "installation_steps": [],
60
- "usage_examples": [],
61
  "model_files": [],
62
- "data_files": [],
63
- "config_files": [],
64
- "entry_points": []
65
  }
66
 
67
- # requirements.txt ๋ถ„์„
68
  req_file = src_path / "requirements.txt"
69
  if req_file.exists():
70
  analysis["has_requirements"] = True
71
  try:
72
  reqs = req_file.read_text(encoding="utf-8").strip().split("\n")
73
- # ์˜์กด์„ฑ ํ•„ํ„ฐ๋ง ๋ฐ ์ •๋ฆฌ
74
  cleaned_deps = []
75
  for r in reqs:
76
  r = r.strip()
77
  if r and not r.startswith("#"):
78
- # ์ž˜๋ชป๋œ ๋ฒ„์ „ ์ˆ˜์ •
79
  if "opencv-python==4.10.0" in r:
80
  r = "opencv-python>=4.10.0.82"
81
  elif "opencv-python==4.10" in r:
82
  r = "opencv-python>=4.10.0.82"
83
 
84
- # ๋ฒ„์ „ ์ œ์•ฝ์ด ๋„ˆ๋ฌด ์—„๊ฒฉํ•œ ๊ฒฝ์šฐ ์™„ํ™”
85
  if "==" in r and not r.startswith("git+"):
86
  pkg_name = r.split("==")[0]
87
- # ์ค‘์š”ํ•œ ํŒจํ‚ค์ง€๋Š” ๋ฒ„์ „ ์œ ์ง€, ๋‚˜๋จธ์ง€๋Š” >= ๋กœ ๋ณ€๊ฒฝ
88
  if pkg_name.lower() in ["torch", "tensorflow", "transformers", "numpy"]:
89
  cleaned_deps.append(r)
90
  else:
91
  version = r.split("==")[1]
92
- # ๋ฒ„์ „์ด x.y ํ˜•์‹์ด๋ฉด x.y.0์œผ๋กœ ๋ณ€๊ฒฝ
93
  if version.count('.') == 1:
94
  version = version + ".0"
95
  cleaned_deps.append(f"{pkg_name}>={version}")
@@ -99,62 +74,34 @@ def analyze_repository(src_path: Path) -> Dict:
99
  except:
100
  analysis["dependencies"] = []
101
 
102
- # README ๋ถ„์„
103
  for readme_name in ["README.md", "readme.md", "README.rst", "README.txt"]:
104
  readme_file = src_path / readme_name
105
  if readme_file.exists():
106
  analysis["has_readme"] = True
107
  try:
108
  readme_content = readme_file.read_text(encoding="utf-8")
109
- analysis["readme_content"] = readme_content[:5000] # ์ฒ˜์Œ 5000์ž๋งŒ
110
-
111
- # ์„ค๋ช… ์ถ”์ถœ
112
  lines = readme_content.split("\n")
113
  for i, line in enumerate(lines[:10]):
114
  if line.strip() and not line.startswith("#") and not line.startswith("!"):
115
  analysis["description"] = line.strip()
116
  break
117
-
118
- # ์„ค์น˜ ๋ฐฉ๋ฒ• ์ฐพ๊ธฐ
119
- install_section = False
120
- usage_section = False
121
- for line in lines:
122
- if "install" in line.lower() and "#" in line:
123
- install_section = True
124
- usage_section = False
125
- continue
126
- elif "usage" in line.lower() and "#" in line:
127
- usage_section = True
128
- install_section = False
129
- continue
130
- elif "#" in line:
131
- install_section = False
132
- usage_section = False
133
-
134
- if install_section and line.strip():
135
- analysis["installation_steps"].append(line.strip())
136
- elif usage_section and line.strip():
137
- analysis["usage_examples"].append(line.strip())
138
  except:
139
  pass
140
 
141
- # ์ฃผ์š” Python ํŒŒ์ผ ์ฐพ๊ธฐ
142
  py_files = list(src_path.glob("**/*.py"))
143
- for py_file in py_files[:20]: # ์ตœ๋Œ€ 20๊ฐœ๋งŒ ๋ถ„์„
144
  if "__pycache__" not in str(py_file) and ".git" not in str(py_file):
145
  relative_path = py_file.relative_to(src_path)
146
 
147
- # ์—”ํŠธ๋ฆฌ ํฌ์ธํŠธ ํ›„๋ณด ์ฐพ๊ธฐ
148
  if any(name in py_file.name for name in ["main.py", "app.py", "demo.py", "run.py", "server.py", "streamlit_app.py"]):
149
  analysis["entry_points"].append(str(relative_path))
150
 
151
- # ํŒŒ์ผ ๋‚ด์šฉ ๊ฐ„๋‹จํžˆ ํ™•์ธ
152
  try:
153
  content = py_file.read_text(encoding="utf-8")[:1000]
154
  if "if __name__" in content and "main" in content:
155
  analysis["entry_points"].append(str(relative_path))
156
 
157
- # ์ฃผ์š” import ํ™•์ธ
158
  if any(lib in content for lib in ["torch", "tensorflow", "transformers", "numpy", "pandas", "cv2", "PIL"]):
159
  analysis["key_files"].append({
160
  "path": str(relative_path),
@@ -163,7 +110,6 @@ def analyze_repository(src_path: Path) -> Dict:
163
  except:
164
  pass
165
 
166
- # ๋ชจ๋ธ ํŒŒ์ผ ์ฐพ๊ธฐ
167
  model_extensions = [".pth", ".pt", ".ckpt", ".h5", ".pb", ".onnx", ".safetensors"]
168
  for ext in model_extensions:
169
  model_files = list(src_path.glob(f"**/*{ext}"))
@@ -171,7 +117,6 @@ def analyze_repository(src_path: Path) -> Dict:
171
  if ".git" not in str(mf):
172
  analysis["model_files"].append(str(mf.relative_to(src_path)))
173
 
174
- # ์„ค์ • ํŒŒ์ผ ์ฐพ๊ธฐ
175
  config_patterns = ["config.json", "config.yaml", "config.yml", "*.json", "*.yaml"]
176
  for pattern in config_patterns:
177
  config_files = list(src_path.glob(pattern))
@@ -181,63 +126,7 @@ def analyze_repository(src_path: Path) -> Dict:
181
 
182
  return analysis
183
 
184
- # ========== Brave Search ํ—ฌํผ (๋‘ ๋ฒˆ์งธ ์ฝ”๋“œ์—์„œ) ========== #
185
- def search_repo_info(repo_url: str) -> str:
186
- """Brave Search๋กœ ๋ ˆํฌ์ง€ํ† ๋ฆฌ ์ •๋ณด ์ˆ˜์ง‘"""
187
- api_key = os.getenv("BAPI_TOKEN")
188
- if not api_key:
189
- return ""
190
-
191
- api_key = api_key.strip()
192
- headers = {"X-Subscription-Token": api_key, "Accept": "application/json"}
193
-
194
- # ๋ ˆํฌ์ง€ํ† ๋ฆฌ ์ด๋ฆ„ ์ถ”์ถœ
195
- repo_parts = repo_url.rstrip("/").split("/")
196
- if len(repo_parts) >= 2:
197
- repo_name = f"{repo_parts[-2]}/{repo_parts[-1]}"
198
- else:
199
- return ""
200
-
201
- # ๊ฒ€์ƒ‰ ์ฟผ๋ฆฌ๋“ค
202
- queries = [
203
- f'"{repo_name}" github tutorial',
204
- f'"{repo_name}" usage example',
205
- f'"{repo_name}" gradio streamlit demo'
206
- ]
207
-
208
- search_results = []
209
- for query in queries:
210
- params = {"q": query, "count": 3}
211
- try:
212
- resp = requests.get(
213
- "https://api.search.brave.com/res/v1/web/search",
214
- headers=headers,
215
- params=params,
216
- timeout=10
217
- )
218
- if resp.status_code == 200:
219
- results = resp.json().get("web", {}).get("results", [])
220
- for r in results:
221
- search_results.append({
222
- "title": r.get("title", ""),
223
- "description": r.get("description", ""),
224
- "url": r.get("url", "")
225
- })
226
- except:
227
- continue
228
-
229
- # ๊ฒ€์ƒ‰ ๊ฒฐ๊ณผ๋ฅผ ํ…์ŠคํŠธ๋กœ ๋ณ€ํ™˜
230
- search_text = f"Search results for {repo_name}:\n"
231
- for r in search_results[:5]:
232
- search_text += f"\n- {r['title']}: {r['description']}\n"
233
-
234
- return search_text
235
-
236
- # ========== AI ์ƒ์„ฑ ํ—ฌํผ (๋‘ ๋ฒˆ์งธ ์ฝ”๋“œ์—์„œ) ========== #
237
- def generate_gradio_app(repo_url: str, analysis: Dict, search_info: str = "") -> Dict:
238
- """AI๋กœ ์‹ค์ œ ๋™์ž‘ํ•˜๋Š” Gradio ์•ฑ ์ƒ์„ฑ"""
239
-
240
- # ์ปจํ…์ŠคํŠธ ์ค€๋น„
241
  context = f"""Repository URL: {repo_url}
242
 
243
  Repository Analysis:
@@ -256,22 +145,6 @@ Key Files Found:
256
  if analysis.get('readme_content'):
257
  context += f"\n--- README.md (excerpt) ---\n{analysis['readme_content'][:2000]}\n"
258
 
259
- if search_info:
260
- context += f"\n--- Web Search Results ---\n{search_info}\n"
261
-
262
- # Installation steps
263
- if analysis['installation_steps']:
264
- context += f"\nInstallation Steps:\n"
265
- for step in analysis['installation_steps'][:5]:
266
- context += f"- {step}\n"
267
-
268
- # Usage examples
269
- if analysis['usage_examples']:
270
- context += f"\nUsage Examples:\n"
271
- for ex in analysis['usage_examples'][:5]:
272
- context += f"- {ex}\n"
273
-
274
- # System prompt
275
  system_prompt = """You are an expert at creating Gradio apps from GitHub repositories.
276
  Your task is to generate a complete, working Gradio interface that demonstrates the main functionality of the repository.
277
 
@@ -281,50 +154,43 @@ CRITICAL REQUIREMENTS:
281
  3. Handle errors gracefully with clear user feedback
282
  4. Include API key inputs when external services are required
283
  5. Create intuitive UI components for the main features
284
- 6. Include helpful descriptions and examples
285
- 7. Always use gradio>=5.35.0
286
- 8. If the project requires external APIs (OpenAI, Anthropic, etc), include:
287
- - API key input fields
288
- - Clear instructions on how to obtain keys
289
- - Environment variable setup guidance
290
- - Graceful handling when keys are missing
291
 
292
  Return ONLY valid JSON with these exact keys:
293
  - app_py: Complete Gradio app code
294
  - requirements_txt: All necessary dependencies including gradio>=5.35.0
295
  - summary: Brief description of what the app does"""
296
 
297
- # OpenAI ์‹œ๋„
298
- openai_key = os.getenv("OPENAI_API_KEY")
299
- if openai_key:
300
  try:
301
- headers = {
302
- "Authorization": f"Bearer {openai_key.strip()}",
303
- "Content-Type": "application/json"
304
- }
305
-
306
  payload = {
307
- "model": "gpt-4o-mini",
 
 
 
 
 
 
308
  "messages": [
309
  {"role": "system", "content": system_prompt},
310
  {"role": "user", "content": f"Create a fully functional Gradio app for this repository:\n\n{context[:8000]}"}
311
- ],
312
- "temperature": 0.3,
313
- "max_tokens": 4000
 
 
 
314
  }
315
 
316
- r = requests.post(
317
- "https://api.openai.com/v1/chat/completions",
318
- json=payload,
319
- headers=headers,
320
- timeout=30
321
- )
322
 
323
  if r.status_code == 200:
324
  response_text = r.json()["choices"][0]["message"]["content"]
325
- print("โœ… OpenAI API๋กœ ์Šค๋งˆํŠธ ์•ฑ ์ƒ์„ฑ ์„ฑ๊ณต")
326
 
327
- # JSON ํŒŒ์‹ฑ
328
  try:
329
  if "```json" in response_text:
330
  start = response_text.find("```json") + 7
@@ -349,76 +215,25 @@ Return ONLY valid JSON with these exact keys:
349
  print(f"โš ๏ธ JSON ํŒŒ์‹ฑ ์˜ค๋ฅ˜: {e}")
350
  return None
351
  except Exception as e:
352
- print(f"โš ๏ธ OpenAI API ์˜ค๋ฅ˜: {e}")
353
 
354
- # Friendli ์‹œ๋„
355
- friendli_token = os.getenv("FRIENDLI_TOKEN")
356
- if friendli_token:
357
- try:
358
- headers = {
359
- "Authorization": f"Bearer {friendli_token.strip()}",
360
- "Content-Type": "application/json"
361
- }
362
-
363
- payload = {
364
- "model": "meta-llama-3.1-70b-instruct",
365
- "messages": [
366
- {"role": "system", "content": system_prompt},
367
- {"role": "user", "content": f"Create a Gradio app:\n{context[:6000]}"}
368
- ],
369
- "max_tokens": 4000,
370
- "temperature": 0.3
371
- }
372
-
373
- for endpoint in [
374
- "https://api.friendli.ai/v1/chat/completions",
375
- "https://api.friendli.ai/dedicated/v1/chat/completions"
376
- ]:
377
- r = requests.post(endpoint, json=payload, headers=headers, timeout=30)
378
- if r.status_code == 200:
379
- response_text = r.json()["choices"][0]["message"]["content"]
380
- print("โœ… Friendli API๋กœ ์Šค๋งˆํŠธ ์•ฑ ์ƒ์„ฑ ์„ฑ๊ณต")
381
-
382
- if "```json" in response_text:
383
- start = response_text.find("```json") + 7
384
- end = response_text.find("```", start)
385
- response_text = response_text[start:end].strip()
386
-
387
- result = json.loads(response_text)
388
-
389
- if "gradio" not in result.get("requirements_txt", "").lower():
390
- result["requirements_txt"] = "gradio>=5.35.0\n" + result.get("requirements_txt", "")
391
-
392
- return result
393
- except Exception as e:
394
- print(f"โš ๏ธ Friendli API ์˜ค๋ฅ˜: {e}")
395
-
396
- # ์Šค๋งˆํŠธ ๊ธฐ๋ณธ ํ…œํ”Œ๋ฆฟ ์ƒ์„ฑ
397
- print("โ„น๏ธ AI API๊ฐ€ ์—†์–ด ์Šค๋งˆํŠธ ๊ธฐ๋ณธ ํ…œํ”Œ๋ฆฟ์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.")
398
  return create_smart_template(repo_url, analysis)
399
 
400
  def create_smart_template(repo_url: str, analysis: Dict) -> Dict:
401
- """๋ถ„์„ ๊ฒฐ๊ณผ๋ฅผ ๋ฐ”ํƒ•์œผ๋กœ ์Šค๋งˆํŠธํ•œ ๊ธฐ๋ณธ ํ…œํ”Œ๋ฆฟ ์ƒ์„ฑ"""
402
-
403
  repo_name = Path(repo_url.rstrip("/")).name
404
  description = analysis.get("description", "A project deployed from GitHub") if analysis else "A project deployed from GitHub"
405
 
406
- # ์˜์กด์„ฑ ๊ธฐ๋ฐ˜ ์•ฑ ํƒ€์ž… ๊ฒฐ์ •
407
  deps = " ".join(analysis.get("dependencies", [])) if analysis else ""
408
- has_ml = any(lib in deps for lib in ["torch", "tensorflow", "transformers", "scikit-learn"])
409
  has_cv = any(lib in deps for lib in ["cv2", "PIL", "pillow", "opencv"])
410
  has_nlp = any(lib in deps for lib in ["transformers", "nltk", "spacy"])
411
- has_audio = any(lib in deps for lib in ["librosa", "soundfile", "pyaudio"])
412
  has_3d = any(lib in deps for lib in ["gaussian", "rasterizer", "plyfile", "trimesh"])
413
 
414
- # ๊ธฐ๋ณธ requirements - git ์˜์กด์„ฑ ์ œ์™ธ
415
  requirements = ["gradio>=5.35.0"]
416
  if analysis and analysis.get("dependencies"):
417
- # git+ ์˜์กด์„ฑ๊ณผ ๋กœ์ปฌ ์˜์กด์„ฑ ์ œ์™ธ
418
  filtered_deps = []
419
  for dep in analysis["dependencies"][:15]:
420
  if not dep.startswith("git+") and not dep.startswith("-e") and not dep.startswith("file:"):
421
- # ๋ฒ„์ „์ด ๋„ˆ๋ฌด ์—„๊ฒฉํ•œ ๊ฒฝ์šฐ ์™„ํ™”
422
  if "==" in dep and dep.split("==")[0].lower() not in ["torch", "tensorflow", "numpy"]:
423
  pkg_name = dep.split("==")[0]
424
  version = dep.split("==")[1]
@@ -427,21 +242,11 @@ def create_smart_template(repo_url: str, analysis: Dict) -> Dict:
427
  filtered_deps.append(dep)
428
  requirements.extend(filtered_deps)
429
 
430
- # ์•ฑ ํƒ€์ž…๋ณ„ ํ…œํ”Œ๋ฆฟ ์ƒ์„ฑ
431
  if has_3d or "gaussian" in repo_name.lower():
432
- # 3D/Gaussian Splatting ์•ฑ
433
  app_code = f'''import gradio as gr
434
  import os
435
- import sys
436
-
437
- # Repository: {repo_url}
438
- # {description}
439
-
440
- # Note: This project requires CUDA-enabled GPU and complex build dependencies
441
- # The original repository uses custom CUDA extensions that need compilation
442
 
443
  def process_3d(input_file):
444
- """3D processing function - placeholder for actual implementation"""
445
  if input_file is None:
446
  return "Please upload a 3D file or image"
447
 
@@ -451,21 +256,11 @@ def process_3d(input_file):
451
  This project requires:
452
  1. CUDA-enabled GPU
453
  2. Custom C++/CUDA extensions compilation
454
- 3. Specific versions of PyTorch with CUDA support
455
-
456
- The git dependencies in requirements.txt need PyTorch to be installed first.
457
-
458
- For full functionality:
459
- 1. Install PyTorch with CUDA: `pip install torch torchvision --index-url https://download.pytorch.org/whl/cu118`
460
- 2. Install build tools: `apt-get install build-essential python3-dev ninja-build`
461
- 3. Then install other requirements
462
 
463
  Original repository: {repo_url}
464
  """
465
-
466
  return info
467
 
468
- # Gradio interface
469
  with gr.Blocks(title="{repo_name}") as demo:
470
  gr.Markdown(f"""
471
  # {repo_name.replace("-", " ").title()}
@@ -473,8 +268,6 @@ with gr.Blocks(title="{repo_name}") as demo:
473
  {description}
474
 
475
  This space was created from: [{repo_url}]({repo_url})
476
-
477
- **Note**: This project has complex build requirements. See below for details.
478
  """)
479
 
480
  with gr.Row():
@@ -499,25 +292,16 @@ if __name__ == "__main__":
499
  from PIL import Image
500
  import numpy as np
501
 
502
- # Repository: {repo_url}
503
- # {description}
504
-
505
  def process_image(image):
506
- """์ด๋ฏธ์ง€ ์ฒ˜๋ฆฌ ํ•จ์ˆ˜ - ์‹ค์ œ ๊ตฌํ˜„์œผ๋กœ ๊ต์ฒด ํ•„์š”"""
507
  if image is None:
508
  return None, "Please upload an image"
509
 
510
- # ์—ฌ๊ธฐ์— ์‹ค์ œ ์ด๋ฏธ์ง€ ์ฒ˜๋ฆฌ ๋กœ์ง ๊ตฌํ˜„
511
- # ์˜ˆ: ๋ชจ๋ธ ๋กœ๋“œ, ์ „์ฒ˜๋ฆฌ, ์ถ”๋ก , ํ›„์ฒ˜๋ฆฌ
512
-
513
- # ๋ฐ๋ชจ์šฉ ๊ฐ„๋‹จํ•œ ์ฒ˜๋ฆฌ
514
  img_array = np.array(image)
515
  processed = Image.fromarray(img_array)
516
 
517
- info = f"Image shape: {img_array.shape}"
518
  return processed, info
519
 
520
- # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ƒ์„ฑ
521
  with gr.Blocks(title="{repo_name}") as demo:
522
  gr.Markdown(f"""
523
  # {repo_name.replace("-", " ").title()}
@@ -549,30 +333,22 @@ if __name__ == "__main__":
549
  elif has_nlp:
550
  app_code = f'''import gradio as gr
551
 
552
- # Repository: {repo_url}
553
- # {description}
554
-
555
  def process_text(text, max_length=100):
556
- """ํ…์ŠคํŠธ ์ฒ˜๋ฆฌ ํ•จ์ˆ˜ - ์‹ค์ œ ๊ตฌํ˜„์œผ๋กœ ๊ต์ฒด ํ•„์š”"""
557
  if not text:
558
  return "Please enter some text"
559
 
560
- # ์—ฌ๊ธฐ์— ์‹ค์ œ NLP ์ฒ˜๋ฆฌ ๋กœ์ง ๊ตฌํ˜„
561
-
562
- # ๋ฐ๋ชจ์šฉ ๊ฐ„๋‹จํ•œ ์ฒ˜๋ฆฌ
563
  word_count = len(text.split())
564
  char_count = len(text)
565
 
566
  result = f"""
567
  **Analysis Results:**
568
- - Word count: {word_count}
569
- - Character count: {char_count}
570
- - Average word length: {char_count/max(word_count, 1):.1f}
571
  """
572
 
573
  return result
574
 
575
- # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ƒ์„ฑ
576
  with gr.Blocks(title="{repo_name}") as demo:
577
  gr.Markdown(f"""
578
  # {repo_name.replace("-", " ").title()}
@@ -613,20 +389,13 @@ if __name__ == "__main__":
613
  else:
614
  app_code = f'''import gradio as gr
615
 
616
- # Repository: {repo_url}
617
- # {description}
618
-
619
  def main_function(input_data):
620
- """๋ฉ”์ธ ์ฒ˜๋ฆฌ ํ•จ์ˆ˜ - ์‹ค์ œ ๊ตฌํ˜„์œผ๋กœ ๊ต์ฒด ํ•„์š”"""
621
  if not input_data:
622
  return "Please provide input"
623
 
624
- # ์—ฌ๊ธฐ์— ์‹ค์ œ ์ฒ˜๋ฆฌ ๋กœ์ง ๊ตฌํ˜„
625
-
626
- result = f"Processed successfully! Input received: {input_data}"
627
  return result
628
 
629
- # Gradio ์ธํ„ฐํŽ˜์ด์Šค ์ƒ์„ฑ
630
  with gr.Blocks(title="{repo_name}") as demo:
631
  gr.Markdown(f"""
632
  # {repo_name.replace("-", " ").title()}
@@ -664,22 +433,17 @@ if __name__ == "__main__":
664
  "summary": f"Smart template created for {repo_name}"
665
  }
666
 
667
- # ========== ํ†ตํ•ฉ๋œ ๋ฉ”์ธ clone ํ•จ์ˆ˜ ========== #
668
  def clone(repo_git, repo_hf, sdk_type, skip_lfs, enable_smart_generation):
669
- """GitHub ๋ ˆํฌ์ง€ํ† ๋ฆฌ๋ฅผ HuggingFace Space๋กœ ๋ณต์ œํ•˜๊ณ  ์Šค๋งˆํŠธํ•˜๊ฒŒ app.py ์ƒ์„ฑ"""
670
  folder = str(uuid.uuid4())
671
 
672
- # ํ™˜๊ฒฝ๋ณ€์ˆ˜์—์„œ HF_TOKEN ๊ฐ€์ ธ์˜ค๊ธฐ
673
  hf_token = os.getenv("HF_TOKEN")
674
  if not hf_token:
675
- yield "โŒ Error: HF_TOKEN not found in environment variables. Please set it in the Space settings."
676
  return
677
 
678
  try:
679
- # Initialize progress messages
680
  yield "๐Ÿ”„ Starting clone process..."
681
 
682
- # Get user info
683
  api = HfApi(token=hf_token)
684
  try:
685
  user_info = api.whoami()
@@ -689,65 +453,47 @@ def clone(repo_git, repo_hf, sdk_type, skip_lfs, enable_smart_generation):
689
  yield f"โŒ Authentication failed: {str(e)}"
690
  return
691
 
692
- # Clone the repository
693
  yield f"๐Ÿ“ฅ Cloning repository from {repo_git}..."
694
 
695
  env = os.environ.copy()
696
-
697
- # Always skip LFS download initially to avoid errors
698
  env['GIT_LFS_SKIP_SMUDGE'] = '1'
699
  clone_cmd = ['git', 'clone', '--recurse-submodules', repo_git, folder]
700
  subprocess.run(clone_cmd, check=True, env=env)
701
 
702
  if not skip_lfs:
703
- # Try to pull LFS files
704
  yield "๐Ÿ“ฆ Attempting to download LFS files..."
705
  try:
706
  subprocess.run(['git', 'lfs', 'install'], cwd=folder, check=True)
707
  lfs_result = subprocess.run(['git', 'lfs', 'pull'], cwd=folder, capture_output=True, text=True)
708
 
709
  if lfs_result.returncode != 0:
710
- yield f"โš ๏ธ Warning: LFS download failed: {lfs_result.stderr}"
711
- yield "โš ๏ธ Will remove LFS pointer files to prevent upload errors..."
712
- skip_lfs = True # Force LFS skip
713
  else:
714
  yield "โœ… LFS files downloaded successfully"
715
  except Exception as e:
716
  yield f"โš ๏ธ LFS error: {str(e)}"
717
- yield "โš ๏ธ Will remove LFS pointer files to prevent upload errors..."
718
- skip_lfs = True # Force LFS skip
719
 
720
- # If we're skipping LFS, remove all LFS pointer files
721
  if skip_lfs:
722
  yield "๐Ÿงน Removing LFS pointer files..."
723
  removed_files = remove_lfs_files(folder)
724
  if removed_files:
725
  yield f"๐Ÿ“ Removed {len(removed_files)} LFS pointer files"
726
- # Show first few removed files
727
- for file in removed_files[:5]:
728
- yield f" - {file}"
729
- if len(removed_files) > 5:
730
- yield f" ... and {len(removed_files) - 5} more files"
731
 
732
- # ์Šค๋งˆํŠธ ์ƒ์„ฑ์ด ํ™œ์„ฑํ™”๋œ ๊ฒฝ์šฐ
733
  if enable_smart_generation:
734
  yield "๐Ÿ” Analyzing repository structure..."
735
  folder_path = Path(folder)
736
  analysis = analyze_repository(folder_path)
737
 
738
- yield "๐Ÿ” Searching for additional information..."
739
- search_info = search_repo_info(repo_git)
740
-
741
  yield "๐Ÿค– Generating smart Gradio app..."
742
- generated = generate_gradio_app(repo_git, analysis, search_info)
743
 
744
  if generated and isinstance(generated, dict) and "app_py" in generated:
745
- # app.py ์ƒ์„ฑ/๋ฎ์–ด์“ฐ๊ธฐ
746
  app_path = folder_path / "app.py"
747
  app_path.write_text(generated["app_py"], encoding="utf-8")
748
  yield "โœ… Smart app.py generated"
749
 
750
- # requirements.txt ์—…๋ฐ์ดํŠธ - ์˜์กด์„ฑ ์ˆœ์„œ ์ตœ์ ํ™”
751
  req_path = folder_path / "requirements.txt"
752
  existing_reqs = []
753
  if req_path.exists():
@@ -758,7 +504,6 @@ def clone(repo_git, repo_hf, sdk_type, skip_lfs, enable_smart_generation):
758
 
759
  new_reqs = generated["requirements_txt"].strip().split("\n") if generated["requirements_txt"] else []
760
 
761
- # ์˜์กด์„ฑ ์ •๋ฆฌ ๋ฐ ์ˆœ์„œ ์ตœ์ ํ™”
762
  all_reqs = set()
763
  git_reqs = []
764
  torch_reqs = []
@@ -769,42 +514,33 @@ def clone(repo_git, repo_hf, sdk_type, skip_lfs, enable_smart_generation):
769
  if not req or req.startswith("#"):
770
  continue
771
 
772
- # git+ ์˜์กด์„ฑ์€ ๋”ฐ๋กœ ๊ด€๋ฆฌ
773
  if req.startswith("git+"):
774
  git_reqs.append(req)
775
- # torch ๊ด€๋ จ ์˜์กด์„ฑ์€ ๋จผ์ € ์„ค์น˜
776
  elif "torch" in req.lower() or "cuda" in req.lower():
777
  torch_reqs.append(req)
778
  else:
779
  regular_reqs.append(req)
780
 
781
- # gradio ๋ฒ„์ „ ํ™•์ธ ๋ฐ ์ถ”๊ฐ€
782
  has_gradio = any("gradio" in req for req in regular_reqs)
783
  if not has_gradio:
784
  regular_reqs.append("gradio>=5.35.0")
785
 
786
- # ์ตœ์ข… requirements.txt ์ƒ์„ฑ (์ˆœ์„œ ์ค‘์š”)
787
  final_reqs = []
788
 
789
- # 1. torch ๊ด€๋ จ ๋จผ์ €
790
  if torch_reqs:
791
  final_reqs.extend(sorted(set(torch_reqs)))
792
- final_reqs.append("") # ๋นˆ ์ค„
793
 
794
- # 2. ์ผ๋ฐ˜ ์˜์กด์„ฑ
795
  final_reqs.extend(sorted(set(regular_reqs)))
796
 
797
- # 3. git ์˜์กด์„ฑ์€ ๋งˆ์ง€๋ง‰์— (torch๊ฐ€ ํ•„์š”ํ•œ ๊ฒฝ์šฐ๊ฐ€ ๋งŽ์Œ)
798
  if git_reqs:
799
- final_reqs.append("") # ๋นˆ ์ค„
800
- final_reqs.append("# Git dependencies (installed last)")
801
  final_reqs.extend(sorted(set(git_reqs)))
802
 
803
  req_content = "\n".join(final_reqs)
804
  req_path.write_text(req_content, encoding="utf-8")
805
- yield "โœ… Requirements.txt updated with optimized dependency order"
806
 
807
- # README.md ์—…๋ฐ์ดํŠธ - ํ•ญ์ƒ ์ƒ์„ฑํ•˜์—ฌ ์˜ฌ๋ฐ”๋ฅธ ํ˜•์‹ ๋ณด์žฅ
808
  readme_path = folder_path / "README.md"
809
  readme_content = f"""---
810
  title: {repo_hf.replace("-", " ").title()}
@@ -822,329 +558,17 @@ pinned: false
822
  {analysis.get('description', 'Deployed from GitHub repository')}
823
 
824
  Deployed from: {repo_git}
825
-
826
- ## Features
827
- This Space provides a Gradio interface for the repository's main functionality.
828
- The app.py was automatically generated based on repository analysis.
829
-
830
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
831
  """
832
  readme_path.write_text(readme_content, encoding="utf-8")
833
  yield "โœ… README.md created/updated"
834
- else:
835
- # ์Šค๋งˆํŠธ ์ƒ์„ฑ์ด ๋น„ํ™œ์„ฑํ™”๋œ ๊ฒฝ์šฐ์—๋„ README.md ํ™•์ธ ๋ฐ ์ƒ์„ฑ
836
- readme_path = Path(folder) / "README.md"
837
- if not readme_path.exists():
838
- # ๊ธฐ๋ณธ README.md ์ƒ์„ฑ
839
- readme_content = f"""---
840
- title: {repo_hf.replace("-", " ").title()}
841
- emoji: ๐Ÿš€
842
- colorFrom: blue
843
- colorTo: green
844
- sdk: {sdk_type}
845
- sdk_version: "5.35.0"
846
- app_file: app.py
847
- pinned: false
848
- ---
849
-
850
- # {repo_hf.replace("-", " ").title()}
851
-
852
- Deployed from: {repo_git}
853
-
854
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
855
- """
856
- readme_path.write_text(readme_content, encoding="utf-8")
857
- yield "โœ… README.md created with required configuration"
858
-
859
- # requirements.txt ํ™•์ธ ๋ฐ ๋ฌธ์ œ ํ•ด๊ฒฐ
860
- req_path = Path(folder) / "requirements.txt"
861
- if req_path.exists():
862
- try:
863
- req_content = req_path.read_text(encoding="utf-8")
864
- lines = req_content.strip().split("\n")
865
-
866
- # ์˜์กด์„ฑ ๋ถ„๋ฅ˜ ๋ฐ ์ค‘๋ณต ์ œ๊ฑฐ
867
- torch_deps = []
868
- git_deps = []
869
- regular_deps = []
870
- problem_git_deps = [] # torch๊ฐ€ ํ•„์š”ํ•œ git ์˜์กด์„ฑ
871
- seen_packages = {} # ํŒจํ‚ค์ง€๋ช… -> ์ „์ฒด ์˜์กด์„ฑ ๋งคํ•‘
872
-
873
- for line in lines:
874
- line = line.strip()
875
- if not line or line.startswith("#"):
876
- continue
877
-
878
- if line.startswith("git+"):
879
- # git ์˜์กด์„ฑ ์ค‘ CUDA/์ปดํŒŒ์ผ์ด ํ•„์š”ํ•œ ๊ฒฝ์šฐ ํ™•์ธ
880
- cuda_keywords = ["gaussian", "rasterizer", "diff-", "cuda", "nvdiffrast", "tiny-cuda"]
881
- if any(keyword in line.lower() for keyword in cuda_keywords):
882
- problem_git_deps.append(line)
883
- else:
884
- git_deps.append(line)
885
- else:
886
- # ํŒจํ‚ค์ง€๋ช… ์ถ”์ถœ (๋ฒ„์ „ ์ง€์ •์ž ์ œ๊ฑฐ)
887
- pkg_name = line.split("==")[0].split(">=")[0].split("<=")[0].split(">")[0].split("<")[0].split("~=")[0].split("[")[0].strip()
888
-
889
- # ํŠน๋ณ„ํ•œ ์„ค์น˜๊ฐ€ ํ•„์š”ํ•œ ํŒจํ‚ค์ง€๋“ค
890
- special_install_packages = ["pytorch3d", "torch-scatter", "torch-sparse", "torch-geometric", "tiny-cuda-nn"]
891
-
892
- if pkg_name in special_install_packages:
893
- problem_git_deps.append(f"# {line} # Requires special installation")
894
- yield f" โ†’ Marked {pkg_name} for special handling"
895
- continue
896
-
897
- # ํŠน์ • ํŒจํ‚ค์ง€์˜ ์ž˜๋ชป๋œ ๋ฒ„์ „ ์ˆ˜์ •
898
- if pkg_name == "opencv-python":
899
- if "==4.10.0" in line or "==4.10" in line:
900
- line = "opencv-python>=4.10.0.82"
901
- yield "๐Ÿ“ Fixed opencv-python version (4.10.0 โ†’ 4.10.0.82)"
902
-
903
- # ์ค‘๋ณต ์ฒดํฌ
904
- if pkg_name in seen_packages:
905
- # ์ด๋ฏธ ์žˆ๋Š” ํŒจํ‚ค์ง€๋ฉด ๋ฒ„์ „ ๋น„๊ต
906
- existing = seen_packages[pkg_name]
907
- # ๋” ๊ตฌ์ฒด์ ์ธ ๋ฒ„์ „์„ ์„ ํƒ (== > >= > ๋ฒ„์ „ ์—†์Œ)
908
- if "==" in line and "==" not in existing:
909
- seen_packages[pkg_name] = line
910
- elif "==" not in existing and ">=" in line and ">=" not in existing:
911
- seen_packages[pkg_name] = line
912
- # ๊ฐ™์€ ์ˆ˜์ค€์ด๋ฉด ๋” ์ตœ์‹  ๋ฒ„์ „ ์„ ํƒ
913
- elif "==" in line and "==" in existing:
914
- try:
915
- new_ver = line.split("==")[1]
916
- old_ver = existing.split("==")[1]
917
- # ๋ฒ„์ „ ๋น„๊ต (๊ฐ„๋‹จํ•œ ๋ฌธ์ž์—ด ๋น„๊ต)
918
- if new_ver > old_ver:
919
- seen_packages[pkg_name] = line
920
- except:
921
- pass
922
- yield f" โ†’ Resolved duplicate: {pkg_name} - using {seen_packages[pkg_name]}"
923
- else:
924
- seen_packages[pkg_name] = line
925
-
926
- # ๋ถ„๋ฅ˜๋œ ์˜์กด์„ฑ์œผ๋กœ ์žฌ๊ตฌ์„ฑ
927
- for pkg_name, dep_line in seen_packages.items():
928
- if any(t in pkg_name.lower() for t in ["torch==", "torch>=", "torch~=", "torch<", "torch>", "torch[", "torchvision", "torchaudio"]):
929
- torch_deps.append(dep_line)
930
- else:
931
- regular_deps.append(dep_line)
932
-
933
- # gradio ๋ฒ„์ „ ํ™•์ธ
934
- has_gradio = any("gradio" in pkg for pkg in seen_packages.keys())
935
- if not has_gradio:
936
- regular_deps.append("gradio>=5.35.0")
937
- seen_packages["gradio"] = "gradio>=5.35.0"
938
-
939
- # torch๊ฐ€ ์—†์œผ๋ฉด ์ถ”๊ฐ€ (CUDA ์˜์กด์„ฑ์ด ์žˆ๋Š” ๊ฒฝ์šฐ)
940
- torch_packages = [p for p in seen_packages.keys() if p == "torch"]
941
- if not torch_packages and (problem_git_deps or any("torch" in dep for dep in git_deps)):
942
- torch_deps.append("torch>=2.0.0")
943
- yield "โš ๏ธ Added torch dependency for git packages"
944
-
945
- # CPU ๋ฒ„์ „ torch๋กœ ๋Œ€์ฒด ์ œ์•ˆ
946
- cpu_torch_suggested = False
947
- for i, dep in enumerate(torch_deps):
948
- if "torch==" in dep or "torch>=" in dep:
949
- # CUDA ๋ฒ„์ „์ด ๋ช…์‹œ๋˜์–ด ์žˆ์œผ๋ฉด CPU ๋ฒ„์ „ ์ œ์•ˆ
950
- if "+cu" in dep:
951
- torch_deps[i] = dep.split("+cu")[0]
952
- cpu_torch_suggested = True
953
-
954
- if cpu_torch_suggested:
955
- yield "โ„น๏ธ Converted torch to CPU version for HuggingFace Spaces compatibility"
956
-
957
- # ์žฌ์ •๋ ฌ๋œ requirements.txt ์ž‘์„ฑ
958
- new_lines = []
959
-
960
- # 1. ๋จผ์ € torch ์„ค์น˜
961
- if torch_deps:
962
- new_lines.append("# PyTorch - Must be installed first")
963
- new_lines.extend(sorted(set(torch_deps)))
964
- new_lines.append("")
965
-
966
- # 2. ์ผ๋ฐ˜ ์˜์กด์„ฑ
967
- if regular_deps:
968
- # ํŠน๋ณ„ํ•œ ์„ค์น˜๊ฐ€ ํ•„์š”ํ•œ ํŒจํ‚ค์ง€๋“ค
969
- special_packages = {
970
- "pytorch3d": "# pytorch3d requires special installation from https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md",
971
- "torch-scatter": "# torch-scatter requires matching torch version",
972
- "torch-sparse": "# torch-sparse requires matching torch version",
973
- "torch-geometric": "# torch-geometric requires special installation"
974
- }
975
-
976
- # ์ถ”๊ฐ€ ๋ฒ„์ „ ๊ฒ€์ฆ ๋ฐ ์ˆ˜์ •
977
- validated_deps = []
978
- problematic_versions = {
979
- "opencv-python": {
980
- "4.10.0": "4.10.0.84",
981
- "4.10": "4.10.0.84",
982
- "4.9.0": "4.9.0.80",
983
- "4.8.0": "4.8.0.76"
984
- },
985
- "pillow": {
986
- "10.0": "10.0.0",
987
- "9.5": "9.5.0"
988
- }
989
- }
990
-
991
- skipped_packages = []
992
-
993
- for dep in regular_deps:
994
- pkg_name = dep.split("==")[0].split(">=")[0].split("[")[0].strip()
995
-
996
- # ํŠน๋ณ„ํ•œ ์„ค์น˜๊ฐ€ ํ•„์š”ํ•œ ํŒจํ‚ค์ง€๋Š” ์ฃผ์„ ์ฒ˜๋ฆฌ
997
- if pkg_name in special_packages:
998
- skipped_packages.append(f"# {dep} {special_packages[pkg_name]}")
999
- yield f" โ†’ Commented out {pkg_name} (requires special installation)"
1000
- continue
1001
-
1002
- # ๋ฒ„์ „ ์ˆ˜์ •์ด ํ•„์š”ํ•œ ํŒจํ‚ค์ง€ ์ฒ˜๋ฆฌ
1003
- if pkg_name in problematic_versions and "==" in dep:
1004
- version = dep.split("==")[1].strip()
1005
- if version in problematic_versions[pkg_name]:
1006
- new_version = problematic_versions[pkg_name][version]
1007
- new_dep = f"{pkg_name}>={new_version}"
1008
- validated_deps.append(new_dep)
1009
- yield f" โ†’ Fixed version: {dep} โ†’ {new_dep}"
1010
- else:
1011
- validated_deps.append(dep)
1012
- else:
1013
- validated_deps.append(dep)
1014
-
1015
- new_lines.append("# Core dependencies")
1016
- # opencv-python ์ค‘๋ณต ์ œ๊ฑฐ ํ™•์ธ
1017
- deduped_regular = []
1018
- seen = set()
1019
- for dep in sorted(validated_deps):
1020
- pkg_name = dep.split("==")[0].split(">=")[0].split("<=")[0].split(">")[0].split("<")[0].split("~=")[0].split("[")[0].strip()
1021
- if pkg_name not in seen:
1022
- deduped_regular.append(dep)
1023
- seen.add(pkg_name)
1024
- new_lines.extend(deduped_regular)
1025
- new_lines.append("")
1026
-
1027
- # ํŠน๋ณ„ํ•œ ์„ค์น˜๊ฐ€ ํ•„์š”ํ•œ ํŒจํ‚ค์ง€๋“ค์„ ์ฃผ์„์œผ๋กœ ์ถ”๊ฐ€
1028
- if skipped_packages:
1029
- new_lines.append("# โš ๏ธ The following packages require special installation:")
1030
- new_lines.extend(skipped_packages)
1031
- new_lines.append("")
1032
-
1033
- # 3. ์ผ๋ฐ˜ git ์˜์กด์„ฑ
1034
- if git_deps:
1035
- new_lines.append("# Git dependencies")
1036
- new_lines.extend(sorted(set(git_deps)))
1037
- new_lines.append("")
1038
-
1039
- # ๋ฌธ์ œ๊ฐ€ ๋˜๋Š” git ์˜์กด์„ฑ๊ณผ ํŠน์ˆ˜ ํŒจํ‚ค์ง€๋Š” ์ฃผ์„ ์ฒ˜๋ฆฌ
1040
- if problem_git_deps:
1041
- new_lines.append("")
1042
- new_lines.append("# โš ๏ธ CUDA-dependent packages and special installations")
1043
- new_lines.append("# These packages require special installation methods:")
1044
- new_lines.append("# - pytorch3d: Install from https://github.com/facebookresearch/pytorch3d/blob/main/INSTALL.md")
1045
- new_lines.append("# - CUDA packages: Require CUDA toolkit and GPU environment")
1046
- new_lines.append("#")
1047
- for dep in problem_git_deps:
1048
- if not dep.startswith("#"):
1049
- new_lines.append(f"# {dep}")
1050
- else:
1051
- new_lines.append(dep)
1052
-
1053
- # ๊ฒฝ๊ณ  ๋ฉ”์‹œ์ง€ ์ถœ๋ ฅ
1054
- yield f"โš ๏ธ Commented out {len(problem_git_deps)} packages requiring special installation"
1055
-
1056
- # ๋นˆ ์ค„ ์ œ๊ฑฐํ•˜๊ณ  ์ •๋ฆฌ
1057
- final_lines = []
1058
- for i, line in enumerate(new_lines):
1059
- # ์ฃผ์„ ๋‹ค์Œ์— ๋ฐ”๋กœ ๋‚ด์šฉ์ด ์žˆ๋Š”์ง€ ํ™•์ธ
1060
- if line.strip() and not (i > 0 and new_lines[i-1].startswith("#") and line == ""):
1061
- final_lines.append(line)
1062
- elif line == "" and i < len(new_lines) - 1: # ์ค‘๊ฐ„์˜ ๋นˆ ์ค„์€ ์œ ์ง€
1063
- final_lines.append(line)
1064
-
1065
- req_path.write_text("\n".join(final_lines), encoding="utf-8")
1066
-
1067
- # ์˜์กด์„ฑ ๊ฐœ์ˆ˜ ํ†ต๊ณ„
1068
- total_deps = len(torch_deps) + len(regular_deps) + len(git_deps) + len(problem_git_deps)
1069
- yield f"โœ… Reorganized requirements.txt - Total {total_deps} dependencies (duplicates removed)"
1070
- if torch_deps:
1071
- yield f" - PyTorch packages: {len(torch_deps)}"
1072
- if regular_deps:
1073
- yield f" - Regular packages: {len(set(regular_deps))}"
1074
- if git_deps or problem_git_deps:
1075
- yield f" - Git dependencies: {len(git_deps + problem_git_deps)} ({len(problem_git_deps)} commented)"
1076
-
1077
- # pre-requirements.txt๋Š” ๋” ์ด์ƒ ํ•„์š”ํ•˜์ง€ ์•Š์Œ (ํ†ตํ•ฉ๋œ requirements.txt ์‚ฌ์šฉ)
1078
- # packages.txt๋„ HF Spaces ๊ธฐ๋ณธ ํ™˜๊ฒฝ์—์„œ๋Š” ๋ถˆํ•„์š”
1079
-
1080
- # README.md์— ๋กœ์ปฌ ์‹คํ–‰ ๊ฐ€์ด๋“œ ์ถ”๊ฐ€
1081
- if problem_git_deps:
1082
- readme_path = Path(folder) / "README.md"
1083
- if readme_path.exists():
1084
- try:
1085
- existing_readme = readme_path.read_text(encoding="utf-8")
1086
-
1087
- # YAML ํ—ค๋” ์ดํ›„์— ๋กœ์ปฌ ์‹คํ–‰ ๊ฐ€์ด๋“œ ์ถ”๊ฐ€
1088
- if "---" in existing_readme:
1089
- parts = existing_readme.split("---", 2)
1090
- if len(parts) >= 3:
1091
- yaml_header = parts[1]
1092
- content = parts[2]
1093
-
1094
- # repo_id๊ฐ€ ์ •์˜๋˜์ง€ ์•Š์•˜์œผ๋ฏ€๋กœ repo_hf์™€ username ์‚ฌ์šฉ
1095
- repo_id = f"{username}/{slugify(repo_hf)}"
1096
-
1097
- local_guide = f"""
1098
- ## โš ๏ธ GPU/CUDA Requirements
1099
-
1100
- This project contains CUDA-dependent packages that cannot run on standard HuggingFace Spaces (CPU environment).
1101
-
1102
- ### Running Locally with GPU
1103
-
1104
- ```bash
1105
- # Install CUDA Toolkit (if not installed)
1106
- # Visit: https://developer.nvidia.com/cuda-downloads
1107
-
1108
- # Clone this Space
1109
- git clone https://huggingface.co/spaces/{repo_id}
1110
- cd {repo_id.split('/')[-1]}
1111
-
1112
- # Install PyTorch with CUDA
1113
- pip install torch torchvision --index-url https://download.pytorch.org/whl/cu118
1114
-
1115
- # Uncomment CUDA dependencies in requirements.txt
1116
- # Then install all requirements
1117
- pip install -r requirements.txt
1118
- ```
1119
-
1120
- ### Enabling GPU on HuggingFace Spaces
1121
-
1122
- To use GPU on this Space:
1123
- 1. Go to Settings โ†’ Hardware
1124
- 2. Select GPU (T4 or A10G)
1125
- 3. Costs apply for GPU usage
1126
-
1127
- ---
1128
-
1129
- """
1130
- new_readme = f"---{yaml_header}---\n{local_guide}{content}"
1131
- readme_path.write_text(new_readme, encoding="utf-8")
1132
- yield "๐Ÿ“ Added GPU setup guide to README.md"
1133
- except Exception as e:
1134
- yield f"โš ๏ธ Could not update README with GPU guide: {str(e)}"
1135
- except Exception as e:
1136
- yield f"โš ๏ธ Error processing requirements.txt: {str(e)}"
1137
 
1138
- # Remove .git directory to save space and avoid issues
1139
  git_dir = os.path.join(folder, '.git')
1140
  if os.path.exists(git_dir):
1141
  shutil.rmtree(git_dir)
1142
  yield "๐Ÿงน Removed .git directory"
1143
 
1144
- # Also clean up .gitattributes to remove LFS tracking
1145
  gitattributes_path = os.path.join(folder, '.gitattributes')
1146
  if os.path.exists(gitattributes_path):
1147
- yield "๐Ÿงน Cleaning .gitattributes file..."
1148
  with open(gitattributes_path, 'r') as f:
1149
  lines = f.readlines()
1150
 
@@ -1157,56 +581,25 @@ To use GPU on this Space:
1157
  with open(gitattributes_path, 'w') as f:
1158
  f.writelines(new_lines)
1159
  else:
1160
- # Remove empty .gitattributes
1161
  os.remove(gitattributes_path)
1162
 
1163
- # ๊ธฐ์กด README๊ฐ€ ์žˆ๋Š”์ง€ ํ™•์ธํ•˜๊ณ  Space ํ—ค๋”๊ฐ€ ์—†์œผ๋ฉด ์ถ”๊ฐ€
1164
- readme_path = Path(folder) / "README.md"
1165
- if readme_path.exists():
1166
- try:
1167
- existing_content = readme_path.read_text(encoding="utf-8")
1168
- # YAML ํ—ค๋”๊ฐ€ ์—†์œผ๋ฉด ์ถ”๊ฐ€
1169
- if not existing_content.strip().startswith("---"):
1170
- yaml_header = f"""---
1171
- title: {repo_hf.replace("-", " ").title()}
1172
- emoji: ๐Ÿš€
1173
- colorFrom: blue
1174
- colorTo: green
1175
- sdk: {sdk_type}
1176
- sdk_version: "5.35.0"
1177
- app_file: app.py
1178
- pinned: false
1179
- ---
1180
-
1181
- """
1182
- new_content = yaml_header + existing_content
1183
- readme_path.write_text(new_content, encoding="utf-8")
1184
- yield "โœ… Updated README.md with Space configuration"
1185
- except Exception as e:
1186
- yield f"โš ๏ธ Could not update README.md: {str(e)}"
1187
-
1188
- # Create the HuggingFace repo with retries
1189
  yield "๐Ÿ—๏ธ Creating Hugging Face Space..."
1190
 
1191
  repo_id = f"{username}/{slugify(repo_hf)}"
1192
  space_created = False
1193
 
1194
- # Space ์ƒ์„ฑ ์‹œ๋„
1195
  for attempt in range(3):
1196
  try:
1197
  yield f" Creating Space: {repo_id} (attempt {attempt + 1}/3)"
1198
 
1199
- # ๋จผ์ € ๊ธฐ์กด Space๊ฐ€ ์žˆ๋Š”์ง€ ํ™•์ธ
1200
  try:
1201
  existing_space = api.space_info(repo_id=repo_id, token=hf_token)
1202
  yield f" โ„น๏ธ Space already exists: {existing_space.id}"
1203
  space_created = True
1204
  break
1205
  except:
1206
- # Space๊ฐ€ ์—†์œผ๋ฉด ์ƒ์„ฑ
1207
  pass
1208
 
1209
- # Space ์ƒ์„ฑ
1210
  create_result = api.create_repo(
1211
  repo_id=repo_id,
1212
  repo_type="space",
@@ -1216,11 +609,8 @@ pinned: false
1216
  token=hf_token
1217
  )
1218
 
1219
- # ์ƒ์„ฑ ํ›„ ์ž ์‹œ ๋Œ€๊ธฐ
1220
- import time
1221
  time.sleep(3)
1222
 
1223
- # ์ƒ์„ฑ ํ™•์ธ
1224
  space_info = api.space_info(repo_id=repo_id, token=hf_token)
1225
  yield f" โœ… Space created successfully: {space_info.id}"
1226
  space_created = True
@@ -1229,39 +619,13 @@ pinned: false
1229
  except Exception as e:
1230
  error_msg = str(e)
1231
 
1232
- # Rate limit ์—๋Ÿฌ ์ฒ˜๋ฆฌ
1233
  if "429" in error_msg or "Too Many Requests" in error_msg:
1234
- yield f"""
1235
- โŒ **Rate Limit Error**
1236
-
1237
- You have reached the HuggingFace API rate limit for creating Spaces.
1238
-
1239
- **What this means:**
1240
- - New users have limited Space creation quotas
1241
- - You need to wait before creating more Spaces (usually 17-24 hours)
1242
- - Your limits will increase over time as you use HuggingFace
1243
-
1244
- **Solutions:**
1245
- 1. **Wait**: Try again in 17-24 hours
1246
- 2. **Use existing Space**: Update an existing Space instead of creating a new one
1247
- 3. **Contact HuggingFace**: Email website@huggingface.co if you need immediate access
1248
- 4. **Alternative**: Create the Space manually on HuggingFace and upload the files
1249
-
1250
- **Manual Space Creation Steps:**
1251
- 1. Go to https://huggingface.co/new-space
1252
- 2. Create a Space named: `{repo_hf}`
1253
- 3. Select SDK: {sdk_type}
1254
- 4. After creation, use the "Files" tab to upload your repository contents
1255
-
1256
- Repository has been cloned to local folder and is ready for manual upload.
1257
- """
1258
- # Rate limit์˜ ๊ฒฝ์šฐ ์žฌ์‹œ๋„ํ•˜์ง€ ์•Š์Œ
1259
- raise Exception(f"Rate limit reached. Please try again later or create the Space manually.")
1260
 
1261
  yield f" โš ๏ธ Attempt {attempt + 1} failed: {error_msg[:100]}..."
1262
  if attempt < 2:
1263
  yield " Retrying in 5 seconds..."
1264
- import time
1265
  time.sleep(5)
1266
  else:
1267
  yield f" โŒ Failed to create space after 3 attempts"
@@ -1270,18 +634,15 @@ Repository has been cloned to local folder and is ready for manual upload.
1270
  if not space_created:
1271
  raise Exception("Failed to create space")
1272
 
1273
- # Check folder size
1274
  folder_size = sum(os.path.getsize(os.path.join(dirpath, filename))
1275
  for dirpath, dirnames, filenames in os.walk(folder)
1276
- for filename in filenames) / (1024 * 1024) # Size in MB
1277
 
1278
  yield f"๐Ÿ“Š Folder size: {folder_size:.2f} MB"
1279
 
1280
- # Count remaining files
1281
  file_count = sum(len(files) for _, _, files in os.walk(folder))
1282
  yield f"๐Ÿ“ Total files to upload: {file_count}"
1283
 
1284
- # Upload to HuggingFace with retry logic
1285
  upload_success = False
1286
  max_retries = 3
1287
 
@@ -1289,11 +650,10 @@ Repository has been cloned to local folder and is ready for manual upload.
1289
  try:
1290
  if attempt > 0:
1291
  yield f"๐Ÿ“ค Upload attempt {attempt + 1}/{max_retries}..."
1292
- import time
1293
- time.sleep(5) # ์žฌ์‹œ๋„ ์ „ ๋Œ€๊ธฐ
1294
 
1295
- if folder_size > 500: # If larger than 500MB, use upload_large_folder
1296
- yield "๐Ÿ“ค Uploading large folder to Hugging Face (this may take several minutes)..."
1297
  api.upload_large_folder(
1298
  folder_path=folder,
1299
  repo_id=repo_id,
@@ -1321,17 +681,13 @@ Repository has been cloned to local folder and is ready for manual upload.
1321
  error_msg = str(upload_error)
1322
 
1323
  if "404" in error_msg and attempt < max_retries - 1:
1324
- yield f" โš ๏ธ Upload failed (404). Space might not be ready yet."
1325
- yield " Waiting 10 seconds before retry..."
1326
- import time
1327
  time.sleep(10)
1328
 
1329
- # Space ๋‹ค์‹œ ํ™•์ธ
1330
  try:
1331
  space_info = api.space_info(repo_id=repo_id, token=hf_token)
1332
  yield f" โœ… Space confirmed to exist"
1333
  except:
1334
- # Space ์žฌ์ƒ์„ฑ ์‹œ๋„
1335
  yield " ๐Ÿ”„ Attempting to recreate space..."
1336
  try:
1337
  api.create_repo(
@@ -1348,23 +704,10 @@ Repository has been cloned to local folder and is ready for manual upload.
1348
 
1349
  elif "LFS pointer" in error_msg:
1350
  yield "โŒ Upload failed due to remaining LFS pointer files"
1351
- yield "๐Ÿ” Searching for remaining LFS pointers..."
1352
-
1353
- # Do another scan for LFS files
1354
- lfs_count = 0
1355
- for root, dirs, files in os.walk(folder):
1356
- for file in files:
1357
- filepath = os.path.join(root, file)
1358
- if is_lfs_pointer_file(filepath):
1359
- lfs_count += 1
1360
- if lfs_count <= 5: # ์ฒ˜์Œ 5๊ฐœ๋งŒ ํ‘œ์‹œ
1361
- yield f" Found LFS pointer: {filepath.replace(folder + os.sep, '')}"
1362
- if lfs_count > 5:
1363
- yield f" ... and {lfs_count - 5} more LFS pointer files"
1364
  raise upload_error
1365
 
1366
  elif attempt == max_retries - 1:
1367
- yield f"โŒ Upload failed after {max_retries} attempts: {error_msg[:200]}..."
1368
  raise upload_error
1369
  else:
1370
  yield f" โš ๏ธ Upload failed: {error_msg[:100]}..."
@@ -1372,45 +715,28 @@ Repository has been cloned to local folder and is ready for manual upload.
1372
  if not upload_success:
1373
  raise Exception("Upload failed after all retries")
1374
 
1375
- # Clean up the temporary folder
1376
  shutil.rmtree(folder)
1377
 
1378
  space_url = f"https://huggingface.co/spaces/{repo_id}"
1379
 
1380
- # ์„ฑ๊ณต ๋ฉ”์‹œ์ง€์™€ ์ƒ์„ธ ์ •๋ณด ์ถœ๋ ฅ
1381
  yield f"""
1382
  โœ… **Successfully created Space!**
1383
 
1384
  ๐Ÿ”— **Your Space URL**: {space_url}
1385
 
1386
- ๐Ÿ“‹ **Deployment Summary:**
1387
- - **Space ID**: `{repo_id}`
1388
- - **Source Repository**: {repo_git}
1389
- - **SDK Type**: {sdk_type}
1390
- - **Smart Generation**: {'Enabled' if enable_smart_generation else 'Disabled'}
1391
- - **LFS Files**: {'Skipped' if skip_lfs else 'Included'}
1392
-
1393
- ๐Ÿš€ **Next Steps:**
1394
- 1. Click the link above to visit your Space
1395
- 2. Wait 2-3 minutes for the initial build to complete
1396
- 3. Check the "Logs" tab if you encounter any issues
1397
- 4. The Space will automatically rebuild when you make changes
1398
-
1399
- ๐Ÿ’ก **Tips:**
1400
- - If the build fails, check the requirements.txt file
1401
- - For GPU-required projects, enable GPU in Space Settings
1402
- - You can edit files directly in the Space's Files tab
1403
  """
1404
 
1405
  if skip_lfs:
1406
- yield "\nโš ๏ธ **Note**: LFS files were removed. The Space may be missing some large files (videos, models, etc.)"
1407
 
1408
  if enable_smart_generation:
1409
- yield "\n๐Ÿค– **Smart Generation**: An AI-generated Gradio interface was created based on repository analysis"
1410
-
1411
- # ์ถ”๊ฐ€ ์•ˆ๋‚ด์‚ฌํ•ญ
1412
- if any(dep.startswith("git+") for dep in analysis.get("dependencies", [])) if enable_smart_generation else False:
1413
- yield "\nโš ๏ธ **Build Notice**: This repository contains git dependencies that may take longer to build"
1414
 
1415
  except subprocess.CalledProcessError as e:
1416
  if os.path.exists(folder):
@@ -1421,7 +747,6 @@ Repository has been cloned to local folder and is ready for manual upload.
1421
  shutil.rmtree(folder)
1422
  yield f"โŒ Error: {str(e)}"
1423
 
1424
- # Custom CSS for better styling
1425
  css = """
1426
  .container {
1427
  max-width: 900px;
@@ -1436,133 +761,41 @@ css = """
1436
  font-size: 14px;
1437
  line-height: 1.5;
1438
  }
1439
- .warning-box {
1440
- background-color: #fff3cd;
1441
- border: 1px solid #ffeaa7;
1442
- border-radius: 4px;
1443
- padding: 12px;
1444
- margin: 10px 0;
1445
- }
1446
- .error-box {
1447
- background-color: #f8d7da;
1448
- border: 1px solid #f5c6cb;
1449
- border-radius: 4px;
1450
- padding: 12px;
1451
- margin: 10px 0;
1452
- }
1453
- .info-box {
1454
- background-color: #d1ecf1;
1455
- border: 1px solid #bee5eb;
1456
- border-radius: 4px;
1457
- padding: 12px;
1458
- margin: 10px 0;
1459
- }
1460
  """
1461
 
1462
  with gr.Blocks(css=css) as demo:
1463
- gr.Markdown("# ๐Ÿš€ Smart GitHub to Hugging Face Space Cloner")
1464
- gr.Markdown("""
1465
- Clone any public GitHub repository and convert it to a Hugging Face Space!
1466
 
1467
- **Features:**
1468
- - โœ… Automatic handling of Git LFS issues
1469
- - โœ… Removes problematic LFS pointer files
1470
- - โœ… Progress updates during cloning
1471
- - โœ… Support for large repositories
1472
- - ๐Ÿค– **NEW: Smart app.py generation with AI analysis**
1473
- """)
1474
-
1475
- # Check for HF_TOKEN
1476
  if not os.getenv("HF_TOKEN"):
1477
- gr.Markdown("""
1478
- <div class="error-box">
1479
- <strong>โŒ HF_TOKEN Required</strong><br>
1480
- Please set the HF_TOKEN environment variable in your Space settings:
1481
- <ol>
1482
- <li>Go to your Space Settings</li>
1483
- <li>Navigate to "Variables and secrets"</li>
1484
- <li>Add a new secret: Name = <code>HF_TOKEN</code>, Value = your Hugging Face write token</li>
1485
- <li>Get a token from: <a href="https://huggingface.co/settings/tokens" target="_blank">https://huggingface.co/settings/tokens</a></li>
1486
- </ol>
1487
- </div>
1488
- """)
1489
  else:
1490
- gr.Markdown("""
1491
- <div class="info-box">
1492
- <strong>โœ… HF_TOKEN Found</strong><br>
1493
- Ready to clone repositories to your Hugging Face account.
1494
- </div>
1495
- """)
1496
-
1497
- # Rate limit ๊ฒฝ๊ณ  ์ถ”๊ฐ€
1498
- gr.Markdown("""
1499
- <div class="warning-box">
1500
- <strong>โš ๏ธ Rate Limits for New Users</strong><br>
1501
- New HuggingFace users have limited Space creation quotas:
1502
- <ul>
1503
- <li>You can create only a few Spaces per day initially</li>
1504
- <li>Limits increase over time with account activity</li>
1505
- <li>If you hit the limit, wait 17-24 hours or update existing Spaces</li>
1506
- <li>Contact website@huggingface.co for immediate access needs</li>
1507
- </ul>
1508
- </div>
1509
- """)
1510
 
1511
  with gr.Row():
1512
  with gr.Column():
1513
  repo_git = gr.Textbox(
1514
  label="GitHub Repository URL",
1515
- placeholder="https://github.com/username/repository",
1516
- info="Enter the full URL of the GitHub repository"
1517
  )
1518
  repo_hf = gr.Textbox(
1519
  label="Hugging Face Space Name",
1520
- placeholder="my-awesome-space",
1521
- info="Choose a name for your new Space (will be slugified)"
1522
  )
1523
  sdk_choices = gr.Radio(
1524
  ["gradio", "streamlit", "docker", "static"],
1525
  label="Space SDK",
1526
- value="gradio",
1527
- info="Select the SDK type for your Space"
1528
  )
1529
  skip_lfs = gr.Checkbox(
1530
  label="Skip Git LFS files",
1531
- value=True, # Default to True due to common LFS issues
1532
- info="Recommended if the repo has large files (videos, models, datasets)"
1533
  )
1534
  enable_smart_generation = gr.Checkbox(
1535
- label="๐Ÿค– Enable Smart app.py Generation (Beta)",
1536
  value=False,
1537
- info="Analyze repository and generate working Gradio interface with AI"
1538
  )
1539
 
1540
- gr.Markdown("""
1541
- <div class="warning-box">
1542
- <strong>โš ๏ธ About Git LFS</strong><br>
1543
- Many repos use Git LFS for large files. If these files are missing or causing errors,
1544
- keeping "Skip Git LFS files" checked will remove them and allow successful cloning.
1545
- </div>
1546
- """)
1547
-
1548
- # Smart Generation ์ •๋ณด
1549
- gr.Markdown("""
1550
- <div class="info-box">
1551
- <strong>๐Ÿค– About Smart Generation</strong><br>
1552
- When enabled, the system will:
1553
- <ul>
1554
- <li>Analyze repository structure and dependencies</li>
1555
- <li>Search for usage examples and documentation</li>
1556
- <li>Generate a working Gradio interface using AI</li>
1557
- <li>Create appropriate requirements.txt</li>
1558
- </ul>
1559
- <br>
1560
- <strong>Required Environment Variables:</strong><br>
1561
- - <code>OPENAI_API_KEY</code> or <code>FRIENDLI_TOKEN</code> for AI generation<br>
1562
- - <code>BAPI_TOKEN</code> for web search (optional)
1563
- </div>
1564
- """)
1565
-
1566
  btn = gr.Button("๐ŸŽฏ Clone Repository", variant="primary")
1567
 
1568
  with gr.Column():
@@ -1574,52 +807,11 @@ with gr.Blocks(css=css) as demo:
1574
  show_copy_button=True
1575
  )
1576
 
1577
- gr.Markdown("""
1578
- ### ๐Ÿ“ Instructions:
1579
- 1. **Setup**: Make sure HF_TOKEN is set in your Space settings
1580
- 2. **Repository URL**: Enter the full GitHub repository URL
1581
- 3. **Space Name**: Choose a name for your new Space
1582
- 4. **SDK**: Select the appropriate SDK for your Space
1583
- 5. **LFS Files**: Keep "Skip Git LFS files" checked if unsure
1584
- 6. **Smart Generation**: Enable to automatically create working app.py
1585
- 7. **Clone**: Click "Clone Repository" and monitor progress
1586
-
1587
- ### ๐Ÿšจ Troubleshooting:
1588
-
1589
- <div class="error-box">
1590
- <strong>LFS pointer file errors?</strong><br>
1591
- Make sure "Skip Git LFS files" is checked. This removes large file pointers that can cause upload failures.
1592
- </div>
1593
-
1594
- - **Missing files after cloning**: The repository used Git LFS for large files that are no longer available
1595
- - **Slow uploads**: Large repositories take time. Consider using a smaller repository or removing unnecessary files
1596
- - **Space doesn't work**: Check if removed LFS files were essential (models, data, etc.) and add them manually
1597
- - **Smart Generation issues**: Make sure you have the required API keys set in environment variables
1598
- """)
1599
-
1600
  btn.click(
1601
  fn=clone,
1602
  inputs=[repo_git, repo_hf, sdk_choices, skip_lfs, enable_smart_generation],
1603
  outputs=output
1604
  )
1605
-
1606
- # ์„ฑ๊ณต ์‚ฌ๋ก€ ๋ฐ ํŒ
1607
- gr.Markdown("""
1608
- ### ๐ŸŒŸ Success Tips:
1609
-
1610
- 1. **For ML/AI Projects**: Enable GPU in Space Settings after deployment
1611
- 2. **For Large Files**: Use Git LFS or host models on HuggingFace Hub
1612
- 3. **For Complex Dependencies**: Check build logs and adjust requirements.txt
1613
- 4. **For Private APIs**: Add secrets in Space Settings (Settings โ†’ Variables and secrets)
1614
-
1615
- ### ๐Ÿ“Š Supported Project Types:
1616
- - ๐Ÿค– Machine Learning models (PyTorch, TensorFlow, Transformers)
1617
- - ๐Ÿ–ผ๏ธ Computer Vision applications
1618
- - ๐Ÿ“ NLP and text processing
1619
- - ๐ŸŽต Audio processing and generation
1620
- - ๐Ÿ“ˆ Data visualization and analysis
1621
- - ๐ŸŽฎ Interactive demos and games
1622
- """)
1623
 
1624
  if __name__ == "__main__":
1625
  demo.launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import HfApi
 
3
  import uuid
4
  from slugify import slugify
5
  import os
 
6
  import json
 
7
  import subprocess
8
  import tempfile
 
9
  import requests
10
  import shutil
11
  import time
12
  from pathlib import Path
13
+ from typing import Optional, Dict, List
14
 
 
15
  def is_lfs_pointer_file(filepath):
 
 
 
 
16
  try:
17
  with open(filepath, 'rb') as f:
18
  header = f.read(100)
 
21
  return False
22
 
23
  def remove_lfs_files(folder):
 
24
  removed_files = []
25
  for root, dirs, files in os.walk(folder):
 
26
  if '.git' in root:
27
  continue
 
28
  for file in files:
29
  filepath = os.path.join(root, file)
30
  if is_lfs_pointer_file(filepath):
31
  os.remove(filepath)
32
  removed_files.append(filepath.replace(folder + os.sep, ''))
 
33
  return removed_files
34
 
 
35
  def analyze_repository(src_path: Path) -> Dict:
 
36
  analysis = {
37
  "has_requirements": False,
38
  "has_readme": False,
 
39
  "main_language": "python",
40
  "key_files": [],
41
  "dependencies": [],
42
  "description": "",
43
+ "entry_points": [],
 
44
  "model_files": [],
45
+ "config_files": []
 
 
46
  }
47
 
 
48
  req_file = src_path / "requirements.txt"
49
  if req_file.exists():
50
  analysis["has_requirements"] = True
51
  try:
52
  reqs = req_file.read_text(encoding="utf-8").strip().split("\n")
 
53
  cleaned_deps = []
54
  for r in reqs:
55
  r = r.strip()
56
  if r and not r.startswith("#"):
 
57
  if "opencv-python==4.10.0" in r:
58
  r = "opencv-python>=4.10.0.82"
59
  elif "opencv-python==4.10" in r:
60
  r = "opencv-python>=4.10.0.82"
61
 
 
62
  if "==" in r and not r.startswith("git+"):
63
  pkg_name = r.split("==")[0]
 
64
  if pkg_name.lower() in ["torch", "tensorflow", "transformers", "numpy"]:
65
  cleaned_deps.append(r)
66
  else:
67
  version = r.split("==")[1]
 
68
  if version.count('.') == 1:
69
  version = version + ".0"
70
  cleaned_deps.append(f"{pkg_name}>={version}")
 
74
  except:
75
  analysis["dependencies"] = []
76
 
 
77
  for readme_name in ["README.md", "readme.md", "README.rst", "README.txt"]:
78
  readme_file = src_path / readme_name
79
  if readme_file.exists():
80
  analysis["has_readme"] = True
81
  try:
82
  readme_content = readme_file.read_text(encoding="utf-8")
83
+ analysis["readme_content"] = readme_content[:5000]
 
 
84
  lines = readme_content.split("\n")
85
  for i, line in enumerate(lines[:10]):
86
  if line.strip() and not line.startswith("#") and not line.startswith("!"):
87
  analysis["description"] = line.strip()
88
  break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  except:
90
  pass
91
 
 
92
  py_files = list(src_path.glob("**/*.py"))
93
+ for py_file in py_files[:20]:
94
  if "__pycache__" not in str(py_file) and ".git" not in str(py_file):
95
  relative_path = py_file.relative_to(src_path)
96
 
 
97
  if any(name in py_file.name for name in ["main.py", "app.py", "demo.py", "run.py", "server.py", "streamlit_app.py"]):
98
  analysis["entry_points"].append(str(relative_path))
99
 
 
100
  try:
101
  content = py_file.read_text(encoding="utf-8")[:1000]
102
  if "if __name__" in content and "main" in content:
103
  analysis["entry_points"].append(str(relative_path))
104
 
 
105
  if any(lib in content for lib in ["torch", "tensorflow", "transformers", "numpy", "pandas", "cv2", "PIL"]):
106
  analysis["key_files"].append({
107
  "path": str(relative_path),
 
110
  except:
111
  pass
112
 
 
113
  model_extensions = [".pth", ".pt", ".ckpt", ".h5", ".pb", ".onnx", ".safetensors"]
114
  for ext in model_extensions:
115
  model_files = list(src_path.glob(f"**/*{ext}"))
 
117
  if ".git" not in str(mf):
118
  analysis["model_files"].append(str(mf.relative_to(src_path)))
119
 
 
120
  config_patterns = ["config.json", "config.yaml", "config.yml", "*.json", "*.yaml"]
121
  for pattern in config_patterns:
122
  config_files = list(src_path.glob(pattern))
 
126
 
127
  return analysis
128
 
129
+ def generate_gradio_app(repo_url: str, analysis: Dict) -> Dict:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  context = f"""Repository URL: {repo_url}
131
 
132
  Repository Analysis:
 
145
  if analysis.get('readme_content'):
146
  context += f"\n--- README.md (excerpt) ---\n{analysis['readme_content'][:2000]}\n"
147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  system_prompt = """You are an expert at creating Gradio apps from GitHub repositories.
149
  Your task is to generate a complete, working Gradio interface that demonstrates the main functionality of the repository.
150
 
 
154
  3. Handle errors gracefully with clear user feedback
155
  4. Include API key inputs when external services are required
156
  5. Create intuitive UI components for the main features
157
+ 6. Always use gradio>=5.35.0
 
 
 
 
 
 
158
 
159
  Return ONLY valid JSON with these exact keys:
160
  - app_py: Complete Gradio app code
161
  - requirements_txt: All necessary dependencies including gradio>=5.35.0
162
  - summary: Brief description of what the app does"""
163
 
164
+ # Fireworks AI API ์‹œ๋„
165
+ fireworks_key = os.getenv("FIREWORKS_API_KEY")
166
+ if fireworks_key:
167
  try:
168
+ url = "https://api.fireworks.ai/inference/v1/chat/completions"
 
 
 
 
169
  payload = {
170
+ "model": "accounts/fireworks/models/qwen3-coder-480b-a35b-instruct",
171
+ "max_tokens": 4096,
172
+ "top_p": 1,
173
+ "top_k": 40,
174
+ "presence_penalty": 0,
175
+ "frequency_penalty": 0,
176
+ "temperature": 0.6,
177
  "messages": [
178
  {"role": "system", "content": system_prompt},
179
  {"role": "user", "content": f"Create a fully functional Gradio app for this repository:\n\n{context[:8000]}"}
180
+ ]
181
+ }
182
+ headers = {
183
+ "Accept": "application/json",
184
+ "Content-Type": "application/json",
185
+ "Authorization": f"Bearer {fireworks_key.strip()}"
186
  }
187
 
188
+ r = requests.post(url, headers=headers, data=json.dumps(payload), timeout=30)
 
 
 
 
 
189
 
190
  if r.status_code == 200:
191
  response_text = r.json()["choices"][0]["message"]["content"]
192
+ print("โœ… Fireworks AI๋กœ ์•ฑ ์ƒ์„ฑ ์„ฑ๊ณต")
193
 
 
194
  try:
195
  if "```json" in response_text:
196
  start = response_text.find("```json") + 7
 
215
  print(f"โš ๏ธ JSON ํŒŒ์‹ฑ ์˜ค๋ฅ˜: {e}")
216
  return None
217
  except Exception as e:
218
+ print(f"โš ๏ธ Fireworks AI API ์˜ค๋ฅ˜: {e}")
219
 
220
+ print("โ„น๏ธ AI API๊ฐ€ ์—†์–ด ๊ธฐ๋ณธ ํ…œํ”Œ๋ฆฟ์„ ์ƒ์„ฑํ•ฉ๋‹ˆ๋‹ค.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  return create_smart_template(repo_url, analysis)
222
 
223
  def create_smart_template(repo_url: str, analysis: Dict) -> Dict:
 
 
224
  repo_name = Path(repo_url.rstrip("/")).name
225
  description = analysis.get("description", "A project deployed from GitHub") if analysis else "A project deployed from GitHub"
226
 
 
227
  deps = " ".join(analysis.get("dependencies", [])) if analysis else ""
 
228
  has_cv = any(lib in deps for lib in ["cv2", "PIL", "pillow", "opencv"])
229
  has_nlp = any(lib in deps for lib in ["transformers", "nltk", "spacy"])
 
230
  has_3d = any(lib in deps for lib in ["gaussian", "rasterizer", "plyfile", "trimesh"])
231
 
 
232
  requirements = ["gradio>=5.35.0"]
233
  if analysis and analysis.get("dependencies"):
 
234
  filtered_deps = []
235
  for dep in analysis["dependencies"][:15]:
236
  if not dep.startswith("git+") and not dep.startswith("-e") and not dep.startswith("file:"):
 
237
  if "==" in dep and dep.split("==")[0].lower() not in ["torch", "tensorflow", "numpy"]:
238
  pkg_name = dep.split("==")[0]
239
  version = dep.split("==")[1]
 
242
  filtered_deps.append(dep)
243
  requirements.extend(filtered_deps)
244
 
 
245
  if has_3d or "gaussian" in repo_name.lower():
 
246
  app_code = f'''import gradio as gr
247
  import os
 
 
 
 
 
 
 
248
 
249
  def process_3d(input_file):
 
250
  if input_file is None:
251
  return "Please upload a 3D file or image"
252
 
 
256
  This project requires:
257
  1. CUDA-enabled GPU
258
  2. Custom C++/CUDA extensions compilation
 
 
 
 
 
 
 
 
259
 
260
  Original repository: {repo_url}
261
  """
 
262
  return info
263
 
 
264
  with gr.Blocks(title="{repo_name}") as demo:
265
  gr.Markdown(f"""
266
  # {repo_name.replace("-", " ").title()}
 
268
  {description}
269
 
270
  This space was created from: [{repo_url}]({repo_url})
 
 
271
  """)
272
 
273
  with gr.Row():
 
292
  from PIL import Image
293
  import numpy as np
294
 
 
 
 
295
  def process_image(image):
 
296
  if image is None:
297
  return None, "Please upload an image"
298
 
 
 
 
 
299
  img_array = np.array(image)
300
  processed = Image.fromarray(img_array)
301
 
302
+ info = f"Image shape: {{img_array.shape}}"
303
  return processed, info
304
 
 
305
  with gr.Blocks(title="{repo_name}") as demo:
306
  gr.Markdown(f"""
307
  # {repo_name.replace("-", " ").title()}
 
333
  elif has_nlp:
334
  app_code = f'''import gradio as gr
335
 
 
 
 
336
  def process_text(text, max_length=100):
 
337
  if not text:
338
  return "Please enter some text"
339
 
 
 
 
340
  word_count = len(text.split())
341
  char_count = len(text)
342
 
343
  result = f"""
344
  **Analysis Results:**
345
+ - Word count: {{word_count}}
346
+ - Character count: {{char_count}}
347
+ - Average word length: {{char_count/max(word_count, 1):.1f}}
348
  """
349
 
350
  return result
351
 
 
352
  with gr.Blocks(title="{repo_name}") as demo:
353
  gr.Markdown(f"""
354
  # {repo_name.replace("-", " ").title()}
 
389
  else:
390
  app_code = f'''import gradio as gr
391
 
 
 
 
392
  def main_function(input_data):
 
393
  if not input_data:
394
  return "Please provide input"
395
 
396
+ result = f"Processed successfully! Input received: {{input_data}}"
 
 
397
  return result
398
 
 
399
  with gr.Blocks(title="{repo_name}") as demo:
400
  gr.Markdown(f"""
401
  # {repo_name.replace("-", " ").title()}
 
433
  "summary": f"Smart template created for {repo_name}"
434
  }
435
 
 
436
  def clone(repo_git, repo_hf, sdk_type, skip_lfs, enable_smart_generation):
 
437
  folder = str(uuid.uuid4())
438
 
 
439
  hf_token = os.getenv("HF_TOKEN")
440
  if not hf_token:
441
+ yield "โŒ Error: HF_TOKEN not found in environment variables."
442
  return
443
 
444
  try:
 
445
  yield "๐Ÿ”„ Starting clone process..."
446
 
 
447
  api = HfApi(token=hf_token)
448
  try:
449
  user_info = api.whoami()
 
453
  yield f"โŒ Authentication failed: {str(e)}"
454
  return
455
 
 
456
  yield f"๐Ÿ“ฅ Cloning repository from {repo_git}..."
457
 
458
  env = os.environ.copy()
 
 
459
  env['GIT_LFS_SKIP_SMUDGE'] = '1'
460
  clone_cmd = ['git', 'clone', '--recurse-submodules', repo_git, folder]
461
  subprocess.run(clone_cmd, check=True, env=env)
462
 
463
  if not skip_lfs:
 
464
  yield "๐Ÿ“ฆ Attempting to download LFS files..."
465
  try:
466
  subprocess.run(['git', 'lfs', 'install'], cwd=folder, check=True)
467
  lfs_result = subprocess.run(['git', 'lfs', 'pull'], cwd=folder, capture_output=True, text=True)
468
 
469
  if lfs_result.returncode != 0:
470
+ yield f"โš ๏ธ Warning: LFS download failed"
471
+ skip_lfs = True
 
472
  else:
473
  yield "โœ… LFS files downloaded successfully"
474
  except Exception as e:
475
  yield f"โš ๏ธ LFS error: {str(e)}"
476
+ skip_lfs = True
 
477
 
 
478
  if skip_lfs:
479
  yield "๐Ÿงน Removing LFS pointer files..."
480
  removed_files = remove_lfs_files(folder)
481
  if removed_files:
482
  yield f"๐Ÿ“ Removed {len(removed_files)} LFS pointer files"
 
 
 
 
 
483
 
 
484
  if enable_smart_generation:
485
  yield "๐Ÿ” Analyzing repository structure..."
486
  folder_path = Path(folder)
487
  analysis = analyze_repository(folder_path)
488
 
 
 
 
489
  yield "๐Ÿค– Generating smart Gradio app..."
490
+ generated = generate_gradio_app(repo_git, analysis)
491
 
492
  if generated and isinstance(generated, dict) and "app_py" in generated:
 
493
  app_path = folder_path / "app.py"
494
  app_path.write_text(generated["app_py"], encoding="utf-8")
495
  yield "โœ… Smart app.py generated"
496
 
 
497
  req_path = folder_path / "requirements.txt"
498
  existing_reqs = []
499
  if req_path.exists():
 
504
 
505
  new_reqs = generated["requirements_txt"].strip().split("\n") if generated["requirements_txt"] else []
506
 
 
507
  all_reqs = set()
508
  git_reqs = []
509
  torch_reqs = []
 
514
  if not req or req.startswith("#"):
515
  continue
516
 
 
517
  if req.startswith("git+"):
518
  git_reqs.append(req)
 
519
  elif "torch" in req.lower() or "cuda" in req.lower():
520
  torch_reqs.append(req)
521
  else:
522
  regular_reqs.append(req)
523
 
 
524
  has_gradio = any("gradio" in req for req in regular_reqs)
525
  if not has_gradio:
526
  regular_reqs.append("gradio>=5.35.0")
527
 
 
528
  final_reqs = []
529
 
 
530
  if torch_reqs:
531
  final_reqs.extend(sorted(set(torch_reqs)))
532
+ final_reqs.append("")
533
 
 
534
  final_reqs.extend(sorted(set(regular_reqs)))
535
 
 
536
  if git_reqs:
537
+ final_reqs.append("")
 
538
  final_reqs.extend(sorted(set(git_reqs)))
539
 
540
  req_content = "\n".join(final_reqs)
541
  req_path.write_text(req_content, encoding="utf-8")
542
+ yield "โœ… Requirements.txt updated"
543
 
 
544
  readme_path = folder_path / "README.md"
545
  readme_content = f"""---
546
  title: {repo_hf.replace("-", " ").title()}
 
558
  {analysis.get('description', 'Deployed from GitHub repository')}
559
 
560
  Deployed from: {repo_git}
 
 
 
 
 
 
561
  """
562
  readme_path.write_text(readme_content, encoding="utf-8")
563
  yield "โœ… README.md created/updated"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
564
 
 
565
  git_dir = os.path.join(folder, '.git')
566
  if os.path.exists(git_dir):
567
  shutil.rmtree(git_dir)
568
  yield "๐Ÿงน Removed .git directory"
569
 
 
570
  gitattributes_path = os.path.join(folder, '.gitattributes')
571
  if os.path.exists(gitattributes_path):
 
572
  with open(gitattributes_path, 'r') as f:
573
  lines = f.readlines()
574
 
 
581
  with open(gitattributes_path, 'w') as f:
582
  f.writelines(new_lines)
583
  else:
 
584
  os.remove(gitattributes_path)
585
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
586
  yield "๐Ÿ—๏ธ Creating Hugging Face Space..."
587
 
588
  repo_id = f"{username}/{slugify(repo_hf)}"
589
  space_created = False
590
 
 
591
  for attempt in range(3):
592
  try:
593
  yield f" Creating Space: {repo_id} (attempt {attempt + 1}/3)"
594
 
 
595
  try:
596
  existing_space = api.space_info(repo_id=repo_id, token=hf_token)
597
  yield f" โ„น๏ธ Space already exists: {existing_space.id}"
598
  space_created = True
599
  break
600
  except:
 
601
  pass
602
 
 
603
  create_result = api.create_repo(
604
  repo_id=repo_id,
605
  repo_type="space",
 
609
  token=hf_token
610
  )
611
 
 
 
612
  time.sleep(3)
613
 
 
614
  space_info = api.space_info(repo_id=repo_id, token=hf_token)
615
  yield f" โœ… Space created successfully: {space_info.id}"
616
  space_created = True
 
619
  except Exception as e:
620
  error_msg = str(e)
621
 
 
622
  if "429" in error_msg or "Too Many Requests" in error_msg:
623
+ yield f"โŒ Rate Limit Error - Try again in 17-24 hours"
624
+ raise Exception(f"Rate limit reached.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
625
 
626
  yield f" โš ๏ธ Attempt {attempt + 1} failed: {error_msg[:100]}..."
627
  if attempt < 2:
628
  yield " Retrying in 5 seconds..."
 
629
  time.sleep(5)
630
  else:
631
  yield f" โŒ Failed to create space after 3 attempts"
 
634
  if not space_created:
635
  raise Exception("Failed to create space")
636
 
 
637
  folder_size = sum(os.path.getsize(os.path.join(dirpath, filename))
638
  for dirpath, dirnames, filenames in os.walk(folder)
639
+ for filename in filenames) / (1024 * 1024)
640
 
641
  yield f"๐Ÿ“Š Folder size: {folder_size:.2f} MB"
642
 
 
643
  file_count = sum(len(files) for _, _, files in os.walk(folder))
644
  yield f"๐Ÿ“ Total files to upload: {file_count}"
645
 
 
646
  upload_success = False
647
  max_retries = 3
648
 
 
650
  try:
651
  if attempt > 0:
652
  yield f"๐Ÿ“ค Upload attempt {attempt + 1}/{max_retries}..."
653
+ time.sleep(5)
 
654
 
655
+ if folder_size > 500:
656
+ yield "๐Ÿ“ค Uploading large folder to Hugging Face..."
657
  api.upload_large_folder(
658
  folder_path=folder,
659
  repo_id=repo_id,
 
681
  error_msg = str(upload_error)
682
 
683
  if "404" in error_msg and attempt < max_retries - 1:
684
+ yield f" โš ๏ธ Upload failed (404). Retrying..."
 
 
685
  time.sleep(10)
686
 
 
687
  try:
688
  space_info = api.space_info(repo_id=repo_id, token=hf_token)
689
  yield f" โœ… Space confirmed to exist"
690
  except:
 
691
  yield " ๐Ÿ”„ Attempting to recreate space..."
692
  try:
693
  api.create_repo(
 
704
 
705
  elif "LFS pointer" in error_msg:
706
  yield "โŒ Upload failed due to remaining LFS pointer files"
 
 
 
 
 
 
 
 
 
 
 
 
 
707
  raise upload_error
708
 
709
  elif attempt == max_retries - 1:
710
+ yield f"โŒ Upload failed after {max_retries} attempts"
711
  raise upload_error
712
  else:
713
  yield f" โš ๏ธ Upload failed: {error_msg[:100]}..."
 
715
  if not upload_success:
716
  raise Exception("Upload failed after all retries")
717
 
 
718
  shutil.rmtree(folder)
719
 
720
  space_url = f"https://huggingface.co/spaces/{repo_id}"
721
 
 
722
  yield f"""
723
  โœ… **Successfully created Space!**
724
 
725
  ๐Ÿ”— **Your Space URL**: {space_url}
726
 
727
+ ๐Ÿ“‹ **Summary:**
728
+ - Space ID: `{repo_id}`
729
+ - Source: {repo_git}
730
+ - SDK: {sdk_type}
731
+ - Smart Generation: {'Enabled' if enable_smart_generation else 'Disabled'}
732
+ - LFS Files: {'Skipped' if skip_lfs else 'Included'}
 
 
 
 
 
 
 
 
 
 
 
733
  """
734
 
735
  if skip_lfs:
736
+ yield "\nโš ๏ธ LFS files were removed."
737
 
738
  if enable_smart_generation:
739
+ yield "\n๐Ÿค– AI-generated Gradio interface was created"
 
 
 
 
740
 
741
  except subprocess.CalledProcessError as e:
742
  if os.path.exists(folder):
 
747
  shutil.rmtree(folder)
748
  yield f"โŒ Error: {str(e)}"
749
 
 
750
  css = """
751
  .container {
752
  max-width: 900px;
 
761
  font-size: 14px;
762
  line-height: 1.5;
763
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
764
  """
765
 
766
  with gr.Blocks(css=css) as demo:
767
+ gr.Markdown("# ๐Ÿš€ GitHub to Hugging Face Space Cloner")
 
 
768
 
 
 
 
 
 
 
 
 
 
769
  if not os.getenv("HF_TOKEN"):
770
+ gr.Markdown("โŒ HF_TOKEN Required - Set it in Space settings")
 
 
 
 
 
 
 
 
 
 
 
771
  else:
772
+ gr.Markdown("โœ… HF_TOKEN Found")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
773
 
774
  with gr.Row():
775
  with gr.Column():
776
  repo_git = gr.Textbox(
777
  label="GitHub Repository URL",
778
+ placeholder="https://github.com/username/repository"
 
779
  )
780
  repo_hf = gr.Textbox(
781
  label="Hugging Face Space Name",
782
+ placeholder="my-awesome-space"
 
783
  )
784
  sdk_choices = gr.Radio(
785
  ["gradio", "streamlit", "docker", "static"],
786
  label="Space SDK",
787
+ value="gradio"
 
788
  )
789
  skip_lfs = gr.Checkbox(
790
  label="Skip Git LFS files",
791
+ value=True
 
792
  )
793
  enable_smart_generation = gr.Checkbox(
794
+ label="๐Ÿค– Enable Smart app.py Generation",
795
  value=False,
796
+ info="Requires FIREWORKS_API_KEY in environment variables"
797
  )
798
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
799
  btn = gr.Button("๐ŸŽฏ Clone Repository", variant="primary")
800
 
801
  with gr.Column():
 
807
  show_copy_button=True
808
  )
809
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
810
  btn.click(
811
  fn=clone,
812
  inputs=[repo_git, repo_hf, sdk_choices, skip_lfs, enable_smart_generation],
813
  outputs=output
814
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
815
 
816
  if __name__ == "__main__":
817
  demo.launch()