Nymbo commited on
Commit
d369d82
·
verified ·
1 Parent(s): 5f048ad

Refactoring the server to be a lot more module, easier to ingest

Browse files
Modules/Deep_Research.py ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import re
5
+ import tempfile
6
+ import time
7
+ from collections import deque
8
+ from concurrent.futures import Future, ThreadPoolExecutor, as_completed
9
+ from typing import Annotated, Dict, List, Tuple
10
+ from urllib.parse import urlparse
11
+
12
+ import gradio as gr
13
+ import requests
14
+ from bs4 import BeautifulSoup
15
+ from ddgs import DDGS
16
+ from huggingface_hub import InferenceClient
17
+
18
+ from .Web_Fetch import _fullpage_markdown_from_soup, _http_get_enhanced
19
+ from app import _log_call_end, _log_call_start, _search_rate_limiter, _truncate_for_log
20
+
21
+ HF_TEXTGEN_TOKEN = os.getenv("HF_READ_TOKEN") or os.getenv("HF_TOKEN")
22
+
23
+
24
+ class SlowHost(Exception):
25
+ pass
26
+
27
+
28
+ def _normalize_query(q: str) -> str:
29
+ if not q:
30
+ return ""
31
+ repl = {"“": '"', "”": '"', "‘": "'", "’": "'", "`": "'"}
32
+ for key, value in repl.items():
33
+ q = q.replace(key, value)
34
+ q = re.sub(r"\s+", " ", q)
35
+ q = re.sub(r'"\s+"', " ", q)
36
+ q = q.strip().strip('"').strip()
37
+ return q
38
+
39
+
40
+ def _search_urls_only(query: str, max_results: int) -> list[str]:
41
+ if not query or not query.strip() or max_results <= 0:
42
+ return []
43
+ urls: list[str] = []
44
+ try:
45
+ _search_rate_limiter.acquire()
46
+ with DDGS() as ddgs:
47
+ for item in ddgs.text(query, region="wt-wt", safesearch="moderate", max_results=max_results):
48
+ url = (item.get("href") or item.get("url") or "").strip()
49
+ if url:
50
+ urls.append(url)
51
+ except Exception:
52
+ pass
53
+ seen = set()
54
+ deduped = []
55
+ for url in urls:
56
+ if url not in seen:
57
+ seen.add(url)
58
+ deduped.append(url)
59
+ return deduped
60
+
61
+
62
+ def _fetch_page_markdown_fast(url: str, max_chars: int = 3000, timeout: float = 10.0) -> str:
63
+ try:
64
+ resp = _http_get_enhanced(url, timeout=timeout, skip_rate_limit=True)
65
+ resp.raise_for_status()
66
+ except requests.exceptions.RequestException as exc:
67
+ msg = str(exc)
68
+ if "timed out" in msg.lower():
69
+ raise SlowHost(msg) from exc
70
+ return ""
71
+ final_url = str(resp.url)
72
+ ctype = resp.headers.get("Content-Type", "")
73
+ if "html" not in ctype.lower():
74
+ return ""
75
+ resp.encoding = resp.encoding or resp.apparent_encoding
76
+ html = resp.text
77
+ soup = BeautifulSoup(html, "lxml")
78
+ md_text = _fullpage_markdown_from_soup(soup, final_url, "")
79
+ if max_chars > 0 and len(md_text) > max_chars:
80
+ md_text = md_text[:max_chars]
81
+ return md_text
82
+
83
+
84
+ def _truncate_join(parts: List[str], max_chars: int) -> Tuple[str, bool]:
85
+ out = []
86
+ total = 0
87
+ truncated = False
88
+ for part in parts:
89
+ if not part:
90
+ continue
91
+ if total + len(part) > max_chars:
92
+ out.append(part[: max(0, max_chars - total)])
93
+ truncated = True
94
+ break
95
+ out.append(part)
96
+ total += len(part)
97
+ return ("\n\n".join(out), truncated)
98
+
99
+
100
+ def _build_research_prompt(summary: str, queries: List[str], url_list: List[str], pages_map: Dict[str, str]) -> str:
101
+ researcher_instructions = (
102
+ "You are Nymbot, a helpful deep research assistant. You will be asked a Query from a user and you will create a long, comprehensive, well-structured research report in response to the user's Query.\n\n"
103
+ "You have been provided with User Question, Search Queries, and numerous webpages that the searches yielded.\n\n"
104
+ "<report_format>\n"
105
+ "Write a well-formatted report in the structure of a scientific report to a broad audience. The report must be readable and have a nice flow of Markdown headers and paragraphs of text. Do NOT use bullet points or lists which break up the natural flow. The report must be exhaustive for comprehensive topics.\n"
106
+ "For any given user query, first determine the major themes or areas that need investigation, then structure these as main sections, and develop detailed subsections that explore various facets of each theme. Each section and subsection requires paragraphs of texts that need to all connect into one narrative flow.\n"
107
+ "</report_format>\n\n"
108
+ "<document_structure>\n"
109
+ "- Always begin with a clear title using a single # header\n"
110
+ "- Organize content into major sections using ## headers\n"
111
+ "- Further divide into subsections using ### headers\n"
112
+ "- Use #### headers sparingly for special subsections\n"
113
+ "- Never skip header levels\n"
114
+ "- Write multiple paragraphs per section or subsection\n"
115
+ "- Each paragraph must contain at least 4-5 sentences, present novel insights and analysis grounded in source material, connect ideas to original query, and build upon previous paragraphs to create a narrative flow\n"
116
+ "- Never use lists, instead always use text or tables\n\n"
117
+ "Mandatory Section Flow:\n"
118
+ "1. Title (# level)\n - Before writing the main report, start with one detailed paragraph summarizing key findings\n"
119
+ "2. Main Body Sections (## level)\n - Each major topic gets its own section (## level). There MUST BE at least 5 sections.\n - Use ### subsections for detailed analysis\n - Every section or subsection needs at least one paragraph of narrative before moving to the next section\n - Do NOT have a section titled \"Main Body Sections\" and instead pick informative section names that convey the theme of the section\n"
120
+ "3. Conclusion (## level)\n - Synthesis of findings\n - Potential recommendations or next steps\n"
121
+ "</document_structure>\n\n"
122
+ "<planning_rules>\n"
123
+ "- Always break it down into multiple steps\n"
124
+ "- Assess the different sources and whether they are useful for any steps needed to answer the query\n"
125
+ "- Create the best report that weighs all the evidence from the sources\n"
126
+ "- Remember that the current date is: Wednesday, April 23, 2025, 11:50 AM EDT\n"
127
+ "- Make sure that your final report addresses all parts of the query\n"
128
+ "- Communicate a brief high-level plan in the introduction; do not reveal chain-of-thought.\n"
129
+ "- When referencing sources during analysis, you should still refer to them by index with brackets and follow <citations>\n"
130
+ "- As a final step, review your planned report structure and ensure it completely answers the query.\n"
131
+ "</planning_rules>\n\n"
132
+ )
133
+ sources_blocks: List[str] = []
134
+ indexed_urls: List[str] = []
135
+ for idx, url in enumerate(url_list, start=1):
136
+ text = pages_map.get(url, "").strip()
137
+ if not text:
138
+ continue
139
+ indexed_urls.append(f"[{idx}] {url}")
140
+ sources_blocks.append(f"[Source {idx}] URL: {url}\n\n{text}")
141
+ sources_joined, truncated = _truncate_join(sources_blocks, max_chars=100_000)
142
+ prompt_parts = [researcher_instructions]
143
+ prompt_parts.append("<user_query_summary>\n" + (summary or "") + "\n</user_query_summary>\n")
144
+ populated = [q for q in queries if q and q.strip()]
145
+ if populated:
146
+ prompt_parts.append("<search_queries>\n" + "\n".join(f"- {q.strip()}" for q in populated) + "\n</search_queries>\n")
147
+ if indexed_urls:
148
+ prompt_parts.append("<sources_list>\n" + "\n".join(indexed_urls) + "\n</sources_list>\n")
149
+ prompt_parts.append("<fetched_documents>\n" + sources_joined + ("\n\n[NOTE] Sources truncated due to context limits." if truncated else "") + "\n</fetched_documents>")
150
+ return "\n\n".join(prompt_parts)
151
+
152
+
153
+ def _write_report_tmp(text: str) -> str:
154
+ tmp_dir = tempfile.mkdtemp(prefix="deep_research_")
155
+ path = os.path.join(tmp_dir, "research_report.txt")
156
+ with open(path, "w", encoding="utf-8") as file:
157
+ file.write(text)
158
+ return path
159
+
160
+
161
+ def Deep_Research(
162
+ summary: Annotated[str, "Summarization of research topic (one or more sentences)."],
163
+ query1: Annotated[str, "DDG Search Query 1"],
164
+ max1: Annotated[int, "Max results for Query 1 (1-50)"] = 10,
165
+ query2: Annotated[str, "DDG Search Query 2"] = "",
166
+ max2: Annotated[int, "Max results for Query 2 (1-50)"] = 10,
167
+ query3: Annotated[str, "DDG Search Query 3"] = "",
168
+ max3: Annotated[int, "Max results for Query 3 (1-50)"] = 10,
169
+ query4: Annotated[str, "DDG Search Query 4"] = "",
170
+ max4: Annotated[int, "Max results for Query 4 (1-50)"] = 10,
171
+ query5: Annotated[str, "DDG Search Query 5"] = "",
172
+ max5: Annotated[int, "Max results for Query 5 (1-50)"] = 10,
173
+ ) -> tuple[str, str, str]:
174
+ _log_call_start(
175
+ "Deep_Research",
176
+ summary=_truncate_for_log(summary or "", 200),
177
+ queries=[q for q in [query1, query2, query3, query4, query5] if q],
178
+ )
179
+ if not HF_TEXTGEN_TOKEN:
180
+ _log_call_end("Deep_Research", "error=missing HF token")
181
+ raise gr.Error("Please provide a `HF_READ_TOKEN` to enable Deep Research.")
182
+ queries = [
183
+ _normalize_query(query1 or ""),
184
+ _normalize_query(query2 or ""),
185
+ _normalize_query(query3 or ""),
186
+ _normalize_query(query4 or ""),
187
+ _normalize_query(query5 or ""),
188
+ ]
189
+ reqs = [
190
+ max(1, min(50, int(max1))),
191
+ max(1, min(50, int(max2))),
192
+ max(1, min(50, int(max3))),
193
+ max(1, min(50, int(max4))),
194
+ max(1, min(50, int(max5))),
195
+ ]
196
+ total_requested = sum(reqs)
197
+ if total_requested > 50:
198
+ reqs = [10, 10, 10, 10, 10]
199
+ start_ts = time.time()
200
+ budget_seconds = 55.0
201
+ deadline = start_ts + budget_seconds
202
+
203
+ def time_left() -> float:
204
+ return max(0.0, deadline - time.time())
205
+
206
+ all_urls: list[str] = []
207
+ tasks = []
208
+ with ThreadPoolExecutor(max_workers=min(5, sum(1 for q in queries if q.strip())) or 1) as executor:
209
+ for query, count in zip(queries, reqs):
210
+ if not query.strip():
211
+ continue
212
+ tasks.append(executor.submit(_search_urls_only, query.strip(), count))
213
+ for future in as_completed(tasks):
214
+ try:
215
+ urls = future.result() or []
216
+ except Exception:
217
+ urls = []
218
+ for url in urls:
219
+ if url not in all_urls:
220
+ all_urls.append(url)
221
+ if len(all_urls) >= 50:
222
+ break
223
+ if time_left() <= 0.5:
224
+ break
225
+ if len(all_urls) > 50:
226
+ all_urls = all_urls[:50]
227
+ blacklist = {
228
+ "homedepot.com",
229
+ "tractorsupply.com",
230
+ "mcmaster.com",
231
+ "mrchain.com",
232
+ "answers.com",
233
+ "city-data.com",
234
+ "dictionary.cambridge.org",
235
+ }
236
+
237
+ def _domain(url: str) -> str:
238
+ try:
239
+ return urlparse(url).netloc.lower()
240
+ except Exception:
241
+ return ""
242
+
243
+ all_urls = [url for url in all_urls if _domain(url) not in blacklist]
244
+ skip_exts = (
245
+ ".pdf",
246
+ ".ppt",
247
+ ".pptx",
248
+ ".doc",
249
+ ".docx",
250
+ ".xls",
251
+ ".xlsx",
252
+ ".zip",
253
+ ".gz",
254
+ ".tgz",
255
+ ".bz2",
256
+ ".7z",
257
+ ".rar",
258
+ )
259
+
260
+ def _skip_url(url: str) -> bool:
261
+ try:
262
+ path = urlparse(url).path.lower()
263
+ except Exception:
264
+ return False
265
+ return any(path.endswith(ext) for ext in skip_exts)
266
+
267
+ all_urls = [url for url in all_urls if not _skip_url(url)]
268
+ pages: dict[str, str] = {}
269
+ if all_urls:
270
+ queue = deque(all_urls)
271
+ attempts: dict[str, int] = {url: 0 for url in all_urls}
272
+ max_attempts = 2
273
+ max_workers = min(12, max(4, len(all_urls)))
274
+ in_flight: dict[Future, str] = {}
275
+ delayed: list[tuple[float, str]] = []
276
+
277
+ def schedule_next(executor: ThreadPoolExecutor) -> None:
278
+ while queue and len(in_flight) < max_workers:
279
+ url = queue.popleft()
280
+ if url in pages:
281
+ continue
282
+ if attempts[url] >= max_attempts:
283
+ continue
284
+ attempts[url] += 1
285
+ tl = time_left()
286
+ per_timeout = 10.0 if tl > 15 else (5.0 if tl > 8 else 2.0)
287
+ future = executor.submit(_fetch_page_markdown_fast, url, 3000, per_timeout)
288
+ in_flight[future] = url
289
+
290
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
291
+ schedule_next(executor)
292
+ while (in_flight or queue) and time_left() > 0.2:
293
+ now = time.time()
294
+ if delayed:
295
+ ready = []
296
+ not_ready = []
297
+ for ready_time, url in delayed:
298
+ (ready if ready_time <= now else not_ready).append((ready_time, url))
299
+ delayed = not_ready
300
+ for _, url in ready:
301
+ queue.append(url)
302
+ if ready:
303
+ schedule_next(executor)
304
+ done = [future for future in list(in_flight.keys()) if future.done()]
305
+ if not done:
306
+ if not queue and delayed:
307
+ sleep_for = max(0.02, min(0.25, max(0.0, min(t for t, _ in delayed) - time.time())))
308
+ time.sleep(sleep_for)
309
+ else:
310
+ time.sleep(0.05)
311
+ else:
312
+ for future in done:
313
+ url = in_flight.pop(future)
314
+ try:
315
+ md = future.result()
316
+ if md and not md.startswith("Unsupported content type") and not md.startswith("An error occurred"):
317
+ pages[url] = md
318
+ try:
319
+ print(f"[FETCH OK] {url} (chars={len(md)})", flush=True)
320
+ except Exception:
321
+ pass
322
+ except SlowHost:
323
+ if time_left() > 5.0:
324
+ delayed.append((time.time() + 3.0, url))
325
+ except Exception:
326
+ pass
327
+ schedule_next(executor)
328
+ prompt = _build_research_prompt(summary=summary or "", queries=[q for q in queries if q.strip()], url_list=list(pages.keys()), pages_map=pages)
329
+ messages = [
330
+ {"role": "system", "content": "You are Nymbot, an expert deep research assistant."},
331
+ {"role": "user", "content": prompt},
332
+ ]
333
+ try:
334
+ prompt_chars = len(prompt)
335
+ except Exception:
336
+ prompt_chars = -1
337
+ print(f"[PIPELINE] Fetch complete: pages={len(pages)}, unique_urls={len(pages.keys())}, prompt_chars={prompt_chars}", flush=True)
338
+ print("[PIPELINE] Starting inference (provider=cerebras, model=Qwen/Qwen3-235B-A22B-Thinking-2507)", flush=True)
339
+
340
+ def _run_inference(provider: str, max_tokens: int, temp: float, top_p: float):
341
+ client = InferenceClient(provider=provider, api_key=HF_TEXTGEN_TOKEN)
342
+ return client.chat.completions.create(
343
+ model="Qwen/Qwen3-235B-A22B-Thinking-2507",
344
+ messages=messages,
345
+ max_tokens=max_tokens,
346
+ temperature=temp,
347
+ top_p=top_p,
348
+ )
349
+
350
+ try:
351
+ print("[LLM] Attempt 1: provider=cerebras, max_tokens=32768", flush=True)
352
+ completion = _run_inference("cerebras", max_tokens=32768, temp=0.3, top_p=0.95)
353
+ except Exception as exc1:
354
+ print(f"[LLM] Attempt 1 failed: {str(exc1)[:200]}", flush=True)
355
+ try:
356
+ prompt2 = _build_research_prompt(
357
+ summary=summary or "",
358
+ queries=[q for q in queries if q.strip()],
359
+ url_list=list(pages.keys())[:30],
360
+ pages_map={key: pages[key] for key in list(pages.keys())[:30]},
361
+ )
362
+ messages = [
363
+ {"role": "system", "content": "You are Nymbot, an expert deep research assistant."},
364
+ {"role": "user", "content": prompt2},
365
+ ]
366
+ print("[LLM] Attempt 2: provider=cerebras (trimmed), max_tokens=16384", flush=True)
367
+ completion = _run_inference("cerebras", max_tokens=16384, temp=0.7, top_p=0.95)
368
+ except Exception as exc2:
369
+ print(f"[LLM] Attempt 2 failed: {str(exc2)[:200]}", flush=True)
370
+ try:
371
+ print("[LLM] Attempt 3: provider=auto, max_tokens=8192", flush=True)
372
+ completion = _run_inference("auto", max_tokens=8192, temp=0.7, top_p=0.95)
373
+ except Exception as exc3:
374
+ _log_call_end("Deep_Research", f"error={_truncate_for_log(str(exc3), 260)}")
375
+ raise gr.Error(f"Researcher model call failed: {exc3}")
376
+ raw = completion.choices[0].message.content or ""
377
+ try:
378
+ no_think = re.sub(r"<think>[\s\S]*?<\\/think>", "", raw, flags=re.IGNORECASE)
379
+ no_think = re.sub(r"<\\/?think>", "", no_think, flags=re.IGNORECASE)
380
+ except Exception:
381
+ no_think = raw
382
+ try:
383
+ paragraphs = [p for p in re.split(r"\n\s*\n", no_think) if p.strip()]
384
+ keep: List[str] = []
385
+ removed = 0
386
+ planning_re = re.compile(r"\b(let me|now i(?:'ll| will)?|first,|i will now|i will|i'll|let's|now let me|i need to|now i'll|now i will)\b", re.IGNORECASE)
387
+ for paragraph in paragraphs:
388
+ if planning_re.search(paragraph):
389
+ removed += 1
390
+ continue
391
+ keep.append(paragraph)
392
+ report = "\n\n".join(keep).strip()
393
+ if not report:
394
+ report = no_think.strip()
395
+ except Exception:
396
+ report = no_think
397
+ removed = 0
398
+ report = re.sub(r"\n\s*\n\s*\n+", "\n\n", report)
399
+ try:
400
+ print(f"[POSTPROCESS] removed_planning_paragraphs={removed}, raw_chars={len(raw)}, final_chars={len(report)}", flush=True)
401
+ except Exception:
402
+ pass
403
+ links_text = "\n".join([f"[{i+1}] {url}" for i, url in enumerate(pages.keys())])
404
+ file_path = _write_report_tmp(report)
405
+ elapsed = time.time() - start_ts
406
+ print(f"[TIMING] Deep_Research elapsed: {elapsed:.2f}s", flush=True)
407
+ _log_call_end("Deep_Research", f"urls={len(pages)} file={os.path.basename(file_path)} duration={elapsed:.2f}s")
408
+ return report, links_text, file_path
409
+
410
+
411
+ def build_interface() -> gr.Interface:
412
+ return gr.Interface(
413
+ fn=Deep_Research,
414
+ inputs=[
415
+ gr.Textbox(label="Summarization of research topic", lines=3, placeholder="Briefly summarize the research topic or user question"),
416
+ gr.Textbox(label="DDG Search Query 1", max_lines=1),
417
+ gr.Slider(1, 50, value=10, step=1, label="Max results (Q1)"),
418
+ gr.Textbox(label="DDG Search Query 2", value="", max_lines=1),
419
+ gr.Slider(1, 50, value=10, step=1, label="Max results (Q2)"),
420
+ gr.Textbox(label="DDG Search Query 3", value="", max_lines=1),
421
+ gr.Slider(1, 50, value=10, step=1, label="Max results (Q3)"),
422
+ gr.Textbox(label="DDG Search Query 4", value="", max_lines=1),
423
+ gr.Slider(1, 50, value=10, step=1, label="Max results (Q4)"),
424
+ gr.Textbox(label="DDG Search Query 5", value="", max_lines=1),
425
+ gr.Slider(1, 50, value=10, step=1, label="Max results (Q5)"),
426
+ ],
427
+ outputs=[
428
+ gr.Markdown(label="Research Report"),
429
+ gr.Textbox(label="Fetched Links", lines=8),
430
+ gr.File(label="Download Research Report", file_count="single"),
431
+ ],
432
+ title="Deep Research",
433
+ description=(
434
+ "<div style=\"text-align:center\">Perform multi-query web research: search with DuckDuckGo, fetch up to 50 pages in parallel, "
435
+ "and generate a comprehensive report using a large LLM via Hugging Face Inference Providers (Cerebras). Requires HF_READ_TOKEN.</div>"
436
+ ),
437
+ api_description=(
438
+ "Runs 1–5 DDG searches (URLs only), caps total results to 50 (when exceeding, each query returns 10). "
439
+ "Fetches all URLs (3000 chars each) and calls the Researcher to write a research report. "
440
+ "Returns the report (Markdown), the list of sources, and a downloadable text file path. "
441
+ "Provide the user with one-paragraph summary of the research report and the txt file in this format `![research_report.txt](URL)`"
442
+ ),
443
+ flagging_mode="never",
444
+ show_api=bool(HF_TEXTGEN_TOKEN),
445
+ )
446
+
447
+
448
+ __all__ = ["Deep_Research", "build_interface"]
Modules/Generate_Image.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import random
5
+ from typing import Annotated
6
+
7
+ import gradio as gr
8
+ from PIL import Image
9
+ from huggingface_hub import InferenceClient
10
+
11
+ from app import _log_call_end, _log_call_start, _truncate_for_log
12
+
13
+ HF_API_TOKEN = os.getenv("HF_READ_TOKEN")
14
+
15
+
16
+ def Generate_Image(
17
+ prompt: Annotated[str, "Text description of the image to generate."],
18
+ model_id: Annotated[str, "Hugging Face model id in the form 'creator/model-name' (e.g., black-forest-labs/FLUX.1-Krea-dev)."] = "black-forest-labs/FLUX.1-Krea-dev",
19
+ negative_prompt: Annotated[str, "What should NOT appear in the image."] = (
20
+ "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, "
21
+ "missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, "
22
+ "mutated, ugly, disgusting, blurry, amputation, misspellings, typos"
23
+ ),
24
+ steps: Annotated[int, "Number of denoising steps (1–100). Higher = slower, potentially higher quality."] = 35,
25
+ cfg_scale: Annotated[float, "Classifier-free guidance scale (1–20). Higher = follow the prompt more closely."] = 7.0,
26
+ sampler: Annotated[str, "Sampling method label (UI only). Common options: 'DPM++ 2M Karras', 'DPM++ SDE Karras', 'Euler', 'Euler a', 'Heun', 'DDIM'."] = "DPM++ 2M Karras",
27
+ seed: Annotated[int, "Random seed for reproducibility. Use -1 for a random seed per call."] = -1,
28
+ width: Annotated[int, "Output width in pixels (64–1216, multiple of 32 recommended)."] = 1024,
29
+ height: Annotated[int, "Output height in pixels (64–1216, multiple of 32 recommended)."] = 1024,
30
+ ) -> Image.Image:
31
+ _log_call_start(
32
+ "Generate_Image",
33
+ prompt=_truncate_for_log(prompt, 200),
34
+ model_id=model_id,
35
+ steps=steps,
36
+ cfg_scale=cfg_scale,
37
+ seed=seed,
38
+ size=f"{width}x{height}",
39
+ )
40
+ if not prompt or not prompt.strip():
41
+ _log_call_end("Generate_Image", "error=empty prompt")
42
+ raise gr.Error("Please provide a non-empty prompt.")
43
+ enhanced_prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
44
+ providers = ["auto", "replicate", "fal-ai"]
45
+ last_error: Exception | None = None
46
+ for provider in providers:
47
+ try:
48
+ client = InferenceClient(api_key=HF_API_TOKEN, provider=provider)
49
+ image = client.text_to_image(
50
+ prompt=enhanced_prompt,
51
+ negative_prompt=negative_prompt,
52
+ model=model_id,
53
+ width=width,
54
+ height=height,
55
+ num_inference_steps=steps,
56
+ guidance_scale=cfg_scale,
57
+ seed=seed if seed != -1 else random.randint(1, 1_000_000_000),
58
+ )
59
+ _log_call_end("Generate_Image", f"provider={provider} size={image.size}")
60
+ return image
61
+ except Exception as exc: # pylint: disable=broad-except
62
+ last_error = exc
63
+ continue
64
+ msg = str(last_error) if last_error else "Unknown error"
65
+ lowered = msg.lower()
66
+ if "404" in msg:
67
+ raise gr.Error(f"Model not found or unavailable: {model_id}. Check the id and your HF token access.")
68
+ if "503" in msg:
69
+ raise gr.Error("The model is warming up. Please try again shortly.")
70
+ if "401" in msg or "403" in msg:
71
+ raise gr.Error("Please duplicate the space and provide a `HF_READ_TOKEN` to enable Image and Video Generation.")
72
+ if ("api_key" in lowered) or ("hf auth login" in lowered) or ("unauthorized" in lowered) or ("forbidden" in lowered):
73
+ raise gr.Error("Please duplicate the space and provide a `HF_READ_TOKEN` to enable Image and Video Generation.")
74
+ _log_call_end("Generate_Image", f"error={_truncate_for_log(msg, 200)}")
75
+ raise gr.Error(f"Image generation failed: {msg}")
76
+
77
+
78
+ def build_interface() -> gr.Interface:
79
+ return gr.Interface(
80
+ fn=Generate_Image,
81
+ inputs=[
82
+ gr.Textbox(label="Prompt", placeholder="Enter a prompt", lines=2),
83
+ gr.Textbox(
84
+ label="Model",
85
+ value="black-forest-labs/FLUX.1-Krea-dev",
86
+ placeholder="creator/model-name",
87
+ max_lines=1,
88
+ info="<a href=\"https://huggingface.co/models?pipeline_tag=text-to-image&inference_provider=nebius,cerebras,novita,fireworks-ai,together,fal-ai,groq,featherless-ai,nscale,hyperbolic,sambanova,cohere,replicate,scaleway,publicai,hf-inference&sort=trending\" target=\"_blank\" rel=\"noopener noreferrer\">Browse models</a>",
89
+ ),
90
+ gr.Textbox(
91
+ label="Negative Prompt",
92
+ value=(
93
+ "(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, "
94
+ "missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, "
95
+ "mutated, ugly, disgusting, blurry, amputation, misspellings, typos"
96
+ ),
97
+ lines=2,
98
+ ),
99
+ gr.Slider(minimum=1, maximum=100, value=35, step=1, label="Steps"),
100
+ gr.Slider(minimum=1.0, maximum=20.0, value=7.0, step=0.1, label="CFG Scale"),
101
+ gr.Radio(
102
+ label="Sampler",
103
+ value="DPM++ 2M Karras",
104
+ choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"],
105
+ ),
106
+ gr.Slider(minimum=-1, maximum=1_000_000_000, value=-1, step=1, label="Seed (-1 = random)"),
107
+ gr.Slider(minimum=64, maximum=1216, value=1024, step=32, label="Width"),
108
+ gr.Slider(minimum=64, maximum=1216, value=1024, step=32, label="Height"),
109
+ ],
110
+ outputs=gr.Image(label="Generated Image"),
111
+ title="Generate Image",
112
+ description=(
113
+ "<div style=\"text-align:center\">Generate images via Hugging Face serverless inference. "
114
+ "Default model is FLUX.1-Krea-dev.</div>"
115
+ ),
116
+ api_description=(
117
+ "Generate a single image from a text prompt using a Hugging Face model via serverless inference. "
118
+ "Supports creative prompts like 'a serene mountain landscape at sunset', 'portrait of a wise owl', "
119
+ "'futuristic city with flying cars'. Default model: FLUX.1-Krea-dev. "
120
+ "Parameters: prompt (str), model_id (str, creator/model-name), negative_prompt (str), steps (int, 1–100), "
121
+ "cfg_scale (float, 1–20), sampler (str), seed (int, -1=random), width/height (int, 64–1216). "
122
+ "Returns a PIL.Image. Return the generated media to the user in this format `![Alt text](URL)`"
123
+ ),
124
+ flagging_mode="never",
125
+ show_api=bool(os.getenv("HF_READ_TOKEN")),
126
+ )
127
+
128
+
129
+ __all__ = ["Generate_Image", "build_interface"]
Modules/Generate_Speech.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import numpy as np
4
+ import gradio as gr
5
+
6
+ from typing import Annotated
7
+
8
+ from app import _log_call_end, _log_call_start, _truncate_for_log
9
+
10
+ try:
11
+ import torch # type: ignore
12
+ except Exception: # pragma: no cover
13
+ torch = None # type: ignore
14
+
15
+ try:
16
+ from kokoro import KModel, KPipeline # type: ignore
17
+ except Exception: # pragma: no cover
18
+ KModel = None # type: ignore
19
+ KPipeline = None # type: ignore
20
+
21
+ _KOKORO_STATE = {
22
+ "initialized": False,
23
+ "device": "cpu",
24
+ "model": None,
25
+ "pipelines": {},
26
+ }
27
+
28
+
29
+ def get_kokoro_voices() -> list[str]:
30
+ try:
31
+ from huggingface_hub import list_repo_files
32
+
33
+ files = list_repo_files("hexgrad/Kokoro-82M")
34
+ voice_files = [file for file in files if file.endswith(".pt") and file.startswith("voices/")]
35
+ voices = [file.replace("voices/", "").replace(".pt", "") for file in voice_files]
36
+ return sorted(voices) if voices else _get_fallback_voices()
37
+ except Exception:
38
+ return _get_fallback_voices()
39
+
40
+
41
+ def _get_fallback_voices() -> list[str]:
42
+ return [
43
+ "af_alloy", "af_aoede", "af_bella", "af_heart", "af_jessica", "af_kore", "af_nicole", "af_nova", "af_river", "af_sarah", "af_sky",
44
+ "am_adam", "am_echo", "am_eric", "am_fenrir", "am_liam", "am_michael", "am_onyx", "am_puck", "am_santa",
45
+ "bf_alice", "bf_emma", "bf_isabella", "bf_lily",
46
+ "bm_daniel", "bm_fable", "bm_george", "bm_lewis",
47
+ "ef_dora", "em_alex", "em_santa",
48
+ "ff_siwis",
49
+ "hf_alpha", "hf_beta", "hm_omega", "hm_psi",
50
+ "if_sara", "im_nicola",
51
+ "jf_alpha", "jf_gongitsune", "jf_nezumi", "jf_tebukuro", "jm_kumo",
52
+ "pf_dora", "pm_alex", "pm_santa",
53
+ "zf_xiaobei", "zf_xiaoni", "zf_xiaoxiao", "zf_xiaoyi",
54
+ "zm_yunjian", "zm_yunxi", "zm_yunxia", "zm_yunyang",
55
+ ]
56
+
57
+
58
+ def _init_kokoro() -> None:
59
+ if _KOKORO_STATE["initialized"]:
60
+ return
61
+ if KModel is None or KPipeline is None:
62
+ raise RuntimeError("Kokoro is not installed. Please install the 'kokoro' package (>=0.9.4).")
63
+ device = "cpu"
64
+ if torch is not None:
65
+ try:
66
+ if torch.cuda.is_available():
67
+ device = "cuda"
68
+ except Exception:
69
+ device = "cpu"
70
+ model = KModel().to(device).eval()
71
+ pipelines = {"a": KPipeline(lang_code="a", model=False)}
72
+ try:
73
+ pipelines["a"].g2p.lexicon.golds["kokoro"] = "kˈOkəɹO"
74
+ except Exception:
75
+ pass
76
+ _KOKORO_STATE.update({"initialized": True, "device": device, "model": model, "pipelines": pipelines})
77
+
78
+
79
+ def List_Kokoro_Voices() -> list[str]:
80
+ return get_kokoro_voices()
81
+
82
+
83
+ def Generate_Speech(
84
+ text: Annotated[str, "The text to synthesize (English)."],
85
+ speed: Annotated[float, "Speech speed multiplier in 0.5–2.0; 1.0 = normal speed."] = 1.25,
86
+ voice: Annotated[str, "Voice identifier from 54 available options."] = "af_heart",
87
+ ) -> tuple[int, np.ndarray]:
88
+ _log_call_start("Generate_Speech", text=_truncate_for_log(text, 200), speed=speed, voice=voice)
89
+ if not text or not text.strip():
90
+ try:
91
+ _log_call_end("Generate_Speech", "error=empty text")
92
+ finally:
93
+ pass
94
+ raise gr.Error("Please provide non-empty text to synthesize.")
95
+ _init_kokoro()
96
+ model = _KOKORO_STATE["model"]
97
+ pipelines = _KOKORO_STATE["pipelines"]
98
+ pipeline = pipelines.get("a")
99
+ if pipeline is None:
100
+ raise gr.Error("Kokoro English pipeline not initialized.")
101
+ audio_segments = []
102
+ pack = pipeline.load_voice(voice)
103
+ try:
104
+ segments = list(pipeline(text, voice, speed))
105
+ total_segments = len(segments)
106
+ for segment_idx, (text_chunk, ps, _) in enumerate(segments):
107
+ ref_s = pack[len(ps) - 1]
108
+ try:
109
+ audio = model(ps, ref_s, float(speed))
110
+ audio_segments.append(audio.detach().cpu().numpy())
111
+ if total_segments > 10 and (segment_idx + 1) % 5 == 0:
112
+ print(f"Progress: Generated {segment_idx + 1}/{total_segments} segments...")
113
+ except Exception as exc:
114
+ raise gr.Error(f"Error generating audio for segment {segment_idx + 1}: {exc}")
115
+ if not audio_segments:
116
+ raise gr.Error("No audio was generated (empty synthesis result).")
117
+ if len(audio_segments) == 1:
118
+ final_audio = audio_segments[0]
119
+ else:
120
+ final_audio = np.concatenate(audio_segments, axis=0)
121
+ if total_segments > 1:
122
+ duration = len(final_audio) / 24_000
123
+ print(f"Completed: {total_segments} segments concatenated into {duration:.1f} seconds of audio")
124
+ _log_call_end("Generate_Speech", f"samples={final_audio.shape[0]} duration_sec={len(final_audio)/24_000:.2f}")
125
+ return 24_000, final_audio
126
+ except gr.Error as exc:
127
+ _log_call_end("Generate_Speech", f"gr_error={str(exc)}")
128
+ raise
129
+ except Exception as exc: # pylint: disable=broad-except
130
+ _log_call_end("Generate_Speech", f"error={str(exc)[:120]}")
131
+ raise gr.Error(f"Error during speech generation: {exc}")
132
+
133
+
134
+ def build_interface() -> gr.Interface:
135
+ available_voices = get_kokoro_voices()
136
+ return gr.Interface(
137
+ fn=Generate_Speech,
138
+ inputs=[
139
+ gr.Textbox(label="Text", placeholder="Type text to synthesize…", lines=4),
140
+ gr.Slider(minimum=0.5, maximum=2.0, value=1.25, step=0.1, label="Speed"),
141
+ gr.Dropdown(
142
+ label="Voice",
143
+ choices=available_voices,
144
+ value="af_heart",
145
+ info="Select from 54 available voices across multiple languages and accents",
146
+ ),
147
+ ],
148
+ outputs=gr.Audio(label="Audio", type="numpy", format="wav", show_download_button=True),
149
+ title="Generate Speech",
150
+ description=(
151
+ "<div style=\"text-align:center\">Generate speech with Kokoro-82M. Supports multiple languages and accents. Runs on CPU or CUDA if available.</div>"
152
+ ),
153
+ api_description=(
154
+ "Synthesize speech from text using Kokoro-82M TTS model. Returns (sample_rate, waveform) suitable for playback. "
155
+ "Parameters: text (str), speed (float 0.5–2.0, default 1.25x), voice (str, default 'af_heart'). "
156
+ "Voice Legend: af=American female, am=American male, bf=British female, bm=British male, ef=European female, em=European male, hf=Hindi female, hm=Hindi male, if=Italian female, im=Italian male, jf=Japanese female, jm=Japanese male, pf=Portuguese female, pm=Portuguese male, zf=Chinese female, zm=Chinese male, ff=French female. "
157
+ "All Voices: af_alloy, af_aoede, af_bella, af_heart, af_jessica, af_kore, af_nicole, af_nova, af_river, af_sarah, af_sky, am_adam, am_echo, am_eric, am_fenrir, am_liam, am_michael, am_onyx, am_puck, am_santa, bf_alice, bf_emma, bf_isabella, bf_lily, bm_daniel, bm_fable, bm_george, bm_lewis, ef_dora, em_alex, em_santa, ff_siwis, hf_alpha, hf_beta, hm_omega, hm_psi, if_sara, im_nicola, jf_alpha, jf_gongitsune, jf_nezumi, jf_tebukuro, jm_kumo, pf_dora, pm_alex, pm_santa, zf_xiaobei, zf_xiaoni, zf_xiaoxiao, zf_xiaoyi, zm_yunjian, zm_yunxi, zm_yunxia, zm_yunyang. "
158
+ "Return the generated media to the user in this format `![Alt text](URL)`"
159
+ ),
160
+ flagging_mode="never",
161
+ )
162
+
163
+
164
+ __all__ = ["Generate_Speech", "List_Kokoro_Voices", "build_interface"]
Modules/Generate_Video.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import random
5
+ import tempfile
6
+ from typing import Annotated
7
+
8
+ import gradio as gr
9
+ from huggingface_hub import InferenceClient
10
+
11
+ from app import _log_call_end, _log_call_start, _truncate_for_log
12
+
13
+ HF_VIDEO_TOKEN = os.getenv("HF_READ_TOKEN") or os.getenv("HF_TOKEN")
14
+
15
+
16
+ def _write_video_tmp(data_iter_or_bytes: object, suffix: str = ".mp4") -> str:
17
+ fd, fname = tempfile.mkstemp(suffix=suffix)
18
+ try:
19
+ with os.fdopen(fd, "wb") as file:
20
+ if isinstance(data_iter_or_bytes, (bytes, bytearray)):
21
+ file.write(data_iter_or_bytes)
22
+ elif hasattr(data_iter_or_bytes, "read"):
23
+ file.write(data_iter_or_bytes.read())
24
+ elif hasattr(data_iter_or_bytes, "content"):
25
+ file.write(data_iter_or_bytes.content) # type: ignore[attr-defined]
26
+ elif hasattr(data_iter_or_bytes, "__iter__") and not isinstance(data_iter_or_bytes, (str, dict)):
27
+ for chunk in data_iter_or_bytes: # type: ignore[assignment]
28
+ if chunk:
29
+ file.write(chunk)
30
+ else:
31
+ raise gr.Error("Unsupported video data type returned by provider.")
32
+ except Exception:
33
+ try:
34
+ os.remove(fname)
35
+ except Exception:
36
+ pass
37
+ raise
38
+ return fname
39
+
40
+
41
+ def Generate_Video(
42
+ prompt: Annotated[str, "Text description of the video to generate (e.g., 'a red fox running through a snowy forest at sunrise')."],
43
+ model_id: Annotated[str, "Hugging Face model id in the form 'creator/model-name'. Defaults to Wan-AI/Wan2.2-T2V-A14B."] = "Wan-AI/Wan2.2-T2V-A14B",
44
+ negative_prompt: Annotated[str, "What should NOT appear in the video."] = "",
45
+ steps: Annotated[int, "Number of denoising steps (1–100). Higher can improve quality but is slower."] = 25,
46
+ cfg_scale: Annotated[float, "Guidance scale (1–20). Higher = follow the prompt more closely, lower = more creative."] = 3.5,
47
+ seed: Annotated[int, "Random seed for reproducibility. Use -1 for a random seed per call."] = -1,
48
+ width: Annotated[int, "Output width in pixels (multiples of 8 recommended)."] = 768,
49
+ height: Annotated[int, "Output height in pixels (multiples of 8 recommended)."] = 768,
50
+ fps: Annotated[int, "Frames per second of the output video (e.g., 24)."] = 24,
51
+ duration: Annotated[float, "Target duration in seconds (provider/model dependent, commonly 2–6s)."] = 4.0,
52
+ ) -> str:
53
+ _log_call_start(
54
+ "Generate_Video",
55
+ prompt=_truncate_for_log(prompt, 160),
56
+ model_id=model_id,
57
+ steps=steps,
58
+ cfg_scale=cfg_scale,
59
+ fps=fps,
60
+ duration=duration,
61
+ size=f"{width}x{height}",
62
+ )
63
+ if not prompt or not prompt.strip():
64
+ _log_call_end("Generate_Video", "error=empty prompt")
65
+ raise gr.Error("Please provide a non-empty prompt.")
66
+ providers = ["auto", "replicate", "fal-ai"]
67
+ last_error: Exception | None = None
68
+ parameters = {
69
+ "negative_prompt": negative_prompt or None,
70
+ "num_inference_steps": steps,
71
+ "guidance_scale": cfg_scale,
72
+ "seed": seed if seed != -1 else random.randint(1, 1_000_000_000),
73
+ "width": width,
74
+ "height": height,
75
+ "fps": fps,
76
+ "duration": duration,
77
+ }
78
+ for provider in providers:
79
+ try:
80
+ client = InferenceClient(api_key=HF_VIDEO_TOKEN, provider=provider)
81
+ if hasattr(client, "text_to_video"):
82
+ num_frames = int(duration * fps) if duration and fps else None
83
+ extra_body = {}
84
+ if width:
85
+ extra_body["width"] = width
86
+ if height:
87
+ extra_body["height"] = height
88
+ if fps:
89
+ extra_body["fps"] = fps
90
+ if duration:
91
+ extra_body["duration"] = duration
92
+ result = client.text_to_video(
93
+ prompt=prompt,
94
+ model=model_id,
95
+ guidance_scale=cfg_scale,
96
+ negative_prompt=[negative_prompt] if negative_prompt else None,
97
+ num_frames=num_frames,
98
+ num_inference_steps=steps,
99
+ seed=parameters["seed"],
100
+ extra_body=extra_body if extra_body else None,
101
+ )
102
+ else:
103
+ result = client.post(
104
+ model=model_id,
105
+ json={"inputs": prompt, "parameters": {k: v for k, v in parameters.items() if v is not None}},
106
+ )
107
+ path = _write_video_tmp(result, suffix=".mp4")
108
+ try:
109
+ size = os.path.getsize(path)
110
+ except Exception:
111
+ size = -1
112
+ _log_call_end("Generate_Video", f"provider={provider} path={os.path.basename(path)} bytes={size}")
113
+ return path
114
+ except Exception as exc: # pylint: disable=broad-except
115
+ last_error = exc
116
+ continue
117
+ msg = str(last_error) if last_error else "Unknown error"
118
+ lowered = msg.lower()
119
+ if "404" in msg:
120
+ raise gr.Error(f"Model not found or unavailable: {model_id}. Check the id and HF token access.")
121
+ if "503" in msg:
122
+ raise gr.Error("The model is warming up. Please try again shortly.")
123
+ if "401" in msg or "403" in msg:
124
+ raise gr.Error("Please duplicate the space and provide a `HF_READ_TOKEN` to enable Image and Video Generation.")
125
+ if ("api_key" in lowered) or ("hf auth login" in lowered) or ("unauthorized" in lowered) or ("forbidden" in lowered):
126
+ raise gr.Error("Please duplicate the space and provide a `HF_READ_TOKEN` to enable Image and Video Generation.")
127
+ _log_call_end("Generate_Video", f"error={_truncate_for_log(msg, 200)}")
128
+ raise gr.Error(f"Video generation failed: {msg}")
129
+
130
+
131
+ def build_interface() -> gr.Interface:
132
+ return gr.Interface(
133
+ fn=Generate_Video,
134
+ inputs=[
135
+ gr.Textbox(label="Prompt", placeholder="Enter a prompt for the video", lines=2),
136
+ gr.Textbox(
137
+ label="Model",
138
+ value="Wan-AI/Wan2.2-T2V-A14B",
139
+ placeholder="creator/model-name",
140
+ max_lines=1,
141
+ info="<a href=\"https://huggingface.co/models?pipeline_tag=text-to-video&inference_provider=nebius,cerebras,novita,fireworks-ai,together,fal-ai,groq,featherless-ai,nscale,hyperbolic,sambanova,cohere,replicate,scaleway,publicai,hf-inference&sort=trending\" target=\"_blank\" rel=\"noopener noreferrer\">Browse models</a>",
142
+ ),
143
+ gr.Textbox(label="Negative Prompt", value="", lines=2),
144
+ gr.Slider(minimum=1, maximum=100, value=25, step=1, label="Steps"),
145
+ gr.Slider(minimum=1.0, maximum=20.0, value=3.5, step=0.1, label="CFG Scale"),
146
+ gr.Slider(minimum=-1, maximum=1_000_000_000, value=-1, step=1, label="Seed (-1 = random)"),
147
+ gr.Slider(minimum=64, maximum=1920, value=768, step=8, label="Width"),
148
+ gr.Slider(minimum=64, maximum=1920, value=768, step=8, label="Height"),
149
+ gr.Slider(minimum=4, maximum=60, value=24, step=1, label="FPS"),
150
+ gr.Slider(minimum=1.0, maximum=10.0, value=4.0, step=0.5, label="Duration (s)"),
151
+ ],
152
+ outputs=gr.Video(label="Generated Video", show_download_button=True, format="mp4"),
153
+ title="Generate Video",
154
+ description=(
155
+ "<div style=\"text-align:center\">Generate short videos via Hugging Face serverless inference. "
156
+ "Default model is Wan2.2-T2V-A14B.</div>"
157
+ ),
158
+ api_description=(
159
+ "Generate a short video from a text prompt using a Hugging Face model via serverless inference. "
160
+ "Create dynamic scenes like 'a red fox running through a snowy forest at sunrise', 'waves crashing on a rocky shore', "
161
+ "'time-lapse of clouds moving across a blue sky'. Default model: Wan2.2-T2V-A14B (2-6 second videos). "
162
+ "Parameters: prompt (str), model_id (str), negative_prompt (str), steps (int), cfg_scale (float), seed (int), "
163
+ "width/height (int), fps (int), duration (float in seconds). Returns MP4 file path. "
164
+ "Return the generated media to the user in this format `![Alt text](URL)`"
165
+ ),
166
+ flagging_mode="never",
167
+ show_api=bool(os.getenv("HF_READ_TOKEN") or os.getenv("HF_TOKEN")),
168
+ )
169
+
170
+
171
+ __all__ = ["Generate_Video", "build_interface"]
Modules/Memory_Manager.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import os
5
+ import threading
6
+ import uuid
7
+ from datetime import datetime
8
+ from typing import Annotated, Dict, List, Literal, Optional
9
+
10
+ import gradio as gr
11
+
12
+ MEMORY_FILE = os.path.join(os.path.dirname(__file__), "memories.json")
13
+ _MEMORY_LOCK = threading.RLock()
14
+ _MAX_MEMORIES = 10_000
15
+
16
+
17
+ def _now_iso() -> str:
18
+ return datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
19
+
20
+
21
+ def _load_memories() -> List[Dict[str, str]]:
22
+ if not os.path.exists(MEMORY_FILE):
23
+ return []
24
+ try:
25
+ with open(MEMORY_FILE, "r", encoding="utf-8") as file:
26
+ data = json.load(file)
27
+ if isinstance(data, list):
28
+ cleaned: List[Dict[str, str]] = []
29
+ for item in data:
30
+ if isinstance(item, dict) and "id" in item and "text" in item:
31
+ cleaned.append(item)
32
+ return cleaned
33
+ return []
34
+ except Exception:
35
+ try:
36
+ backup = MEMORY_FILE + ".corrupt"
37
+ if not os.path.exists(backup):
38
+ os.replace(MEMORY_FILE, backup)
39
+ except Exception:
40
+ pass
41
+ return []
42
+
43
+
44
+ def _save_memories(memories: List[Dict[str, str]]) -> None:
45
+ tmp_path = MEMORY_FILE + ".tmp"
46
+ with open(tmp_path, "w", encoding="utf-8") as file:
47
+ json.dump(memories, file, ensure_ascii=False, indent=2)
48
+ os.replace(tmp_path, MEMORY_FILE)
49
+
50
+
51
+ def _mem_save(text: str, tags: str) -> str:
52
+ text_clean = (text or "").strip()
53
+ if not text_clean:
54
+ return "Error: memory text is empty."
55
+ with _MEMORY_LOCK:
56
+ memories = _load_memories()
57
+ if memories and memories[-1].get("text") == text_clean:
58
+ return "Skipped: identical to last stored memory."
59
+ mem_id = str(uuid.uuid4())
60
+ entry = {
61
+ "id": mem_id,
62
+ "text": text_clean,
63
+ "timestamp": _now_iso(),
64
+ "tags": tags.strip(),
65
+ }
66
+ memories.append(entry)
67
+ if len(memories) > _MAX_MEMORIES:
68
+ overflow = len(memories) - _MAX_MEMORIES
69
+ memories = memories[overflow:]
70
+ _save_memories(memories)
71
+ return f"Memory saved: {mem_id}"
72
+
73
+
74
+ def _mem_list(limit: int, include_tags: bool) -> str:
75
+ limit = max(1, min(200, limit))
76
+ with _MEMORY_LOCK:
77
+ memories = _load_memories()
78
+ if not memories:
79
+ return "No memories stored yet."
80
+ chosen = memories[-limit:][::-1]
81
+ lines: List[str] = []
82
+ for memory in chosen:
83
+ base = f"{memory['id'][:8]} [{memory.get('timestamp','?')}] {memory.get('text','')}"
84
+ if include_tags and memory.get("tags"):
85
+ base += f" | tags: {memory['tags']}"
86
+ lines.append(base)
87
+ omitted = len(memories) - len(chosen)
88
+ if omitted > 0:
89
+ lines.append(f"… ({omitted} older memorie{'s' if omitted!=1 else ''} omitted; total={len(memories)})")
90
+ return "\n".join(lines)
91
+
92
+
93
+ def _parse_search_query(query: str) -> Dict[str, List[str]]:
94
+ import re
95
+
96
+ result = {"tag_terms": [], "text_terms": [], "operator": "and"}
97
+ if not query or not query.strip():
98
+ return result
99
+ query = re.sub(r"\s+", " ", query.strip())
100
+ if re.search(r"\bOR\b", query, re.IGNORECASE):
101
+ result["operator"] = "or"
102
+ parts = re.split(r"\s+OR\s+", query, flags=re.IGNORECASE)
103
+ else:
104
+ parts = re.split(r"\s+(?:AND\s+)?", query, flags=re.IGNORECASE)
105
+ parts = [p for p in parts if p.strip() and p.strip().upper() != "AND"]
106
+ for part in parts:
107
+ part = part.strip()
108
+ if not part:
109
+ continue
110
+ tag_match = re.match(r"^tag:(.+)$", part, re.IGNORECASE)
111
+ if tag_match:
112
+ tag_name = tag_match.group(1).strip()
113
+ if tag_name:
114
+ result["tag_terms"].append(tag_name.lower())
115
+ else:
116
+ result["text_terms"].append(part.lower())
117
+ return result
118
+
119
+
120
+ def _match_memory_with_query(memory: Dict[str, str], parsed_query: Dict[str, List[str]]) -> bool:
121
+ tag_terms = parsed_query["tag_terms"]
122
+ text_terms = parsed_query["text_terms"]
123
+ operator = parsed_query["operator"]
124
+ if not tag_terms and not text_terms:
125
+ return False
126
+ memory_text = memory.get("text", "").lower()
127
+ memory_tags = memory.get("tags", "").lower()
128
+ memory_tag_list = [tag.strip() for tag in memory_tags.split(",") if tag.strip()]
129
+ tag_matches = [any(tag_term in tag for tag in memory_tag_list) for tag_term in tag_terms]
130
+ combined_text = memory_text + " " + memory_tags
131
+ text_matches = [text_term in combined_text for text_term in text_terms]
132
+ all_matches = tag_matches + text_matches
133
+ if not all_matches:
134
+ return False
135
+ if operator == "or":
136
+ return any(all_matches)
137
+ return all(all_matches)
138
+
139
+
140
+ def _mem_search(query: str, limit: int) -> str:
141
+ q = (query or "").strip()
142
+ if not q:
143
+ return "Error: empty query."
144
+ parsed_query = _parse_search_query(q)
145
+ if not parsed_query["tag_terms"] and not parsed_query["text_terms"]:
146
+ return "Error: no valid search terms found."
147
+ limit = max(1, min(200, limit))
148
+ with _MEMORY_LOCK:
149
+ memories = _load_memories()
150
+ matches: List[Dict[str, str]] = []
151
+ total_matches = 0
152
+ for memory in reversed(memories):
153
+ if _match_memory_with_query(memory, parsed_query):
154
+ total_matches += 1
155
+ if len(matches) < limit:
156
+ matches.append(memory)
157
+ if not matches:
158
+ return f"No matches for: {query}"
159
+ lines = [
160
+ f"{memory['id'][:8]} [{memory.get('timestamp','?')}] {memory.get('text','')}" + (f" | tags: {memory['tags']}" if memory.get('tags') else "")
161
+ for memory in matches
162
+ ]
163
+ omitted = total_matches - len(matches)
164
+ if omitted > 0:
165
+ lines.append(f"… ({omitted} additional match{'es' if omitted!=1 else ''} omitted; total_matches={total_matches})")
166
+ return "\n".join(lines)
167
+
168
+
169
+ def _mem_delete(memory_id: str) -> str:
170
+ key = (memory_id or "").strip().lower()
171
+ if len(key) < 4:
172
+ return "Error: supply at least 4 characters of the id."
173
+ with _MEMORY_LOCK:
174
+ memories = _load_memories()
175
+ matched = [memory for memory in memories if memory["id"].lower().startswith(key)]
176
+ if not matched:
177
+ return "Memory not found."
178
+ if len(matched) > 1 and key != matched[0]["id"].lower():
179
+ sample = ", ".join(memory["id"][:8] for memory in matched[:5])
180
+ more = "…" if len(matched) > 5 else ""
181
+ return f"Ambiguous prefix (matches {len(matched)} ids: {sample}{more}). Provide more characters."
182
+ target_id = matched[0]["id"]
183
+ memories = [memory for memory in memories if memory["id"] != target_id]
184
+ _save_memories(memories)
185
+ return f"Deleted memory: {target_id}"
186
+
187
+
188
+ def Memory_Manager(
189
+ action: Annotated[Literal["save", "list", "search", "delete"], "Action to perform: save | list | search | delete"],
190
+ text: Annotated[Optional[str], "Text content (Save only)"] = None,
191
+ tags: Annotated[Optional[str], "Comma-separated tags (Save only)"] = None,
192
+ query: Annotated[Optional[str], "Enhanced search with tag:name syntax, AND/OR operators (Search only)"] = None,
193
+ limit: Annotated[int, "Max results (List/Search only)"] = 20,
194
+ memory_id: Annotated[Optional[str], "Full UUID or unique prefix (Delete only)"] = None,
195
+ include_tags: Annotated[bool, "Include tags (List/Search only)"] = True,
196
+ ) -> str:
197
+ act = (action or "").lower().strip()
198
+ text = text or ""
199
+ tags = tags or ""
200
+ query = query or ""
201
+ memory_id = memory_id or ""
202
+ if act == "save":
203
+ if not text.strip():
204
+ return "Error: 'text' is required when action=save."
205
+ return _mem_save(text=text, tags=tags)
206
+ if act == "list":
207
+ return _mem_list(limit=limit, include_tags=include_tags)
208
+ if act == "search":
209
+ if not query.strip():
210
+ return "Error: 'query' is required when action=search."
211
+ return _mem_search(query=query, limit=limit)
212
+ if act == "delete":
213
+ if not memory_id.strip():
214
+ return "Error: 'memory_id' is required when action=delete."
215
+ return _mem_delete(memory_id=memory_id)
216
+ return "Error: invalid action (use save|list|search|delete)."
217
+
218
+
219
+ def build_interface() -> gr.Interface:
220
+ return gr.Interface(
221
+ fn=Memory_Manager,
222
+ inputs=[
223
+ gr.Dropdown(label="Action", choices=["save", "list", "search", "delete"], value="list"),
224
+ gr.Textbox(label="Text", lines=3, placeholder="Memory text (save)"),
225
+ gr.Textbox(label="Tags", placeholder="tag1, tag2", max_lines=1),
226
+ gr.Textbox(label="Query", placeholder="tag:work AND tag:project OR meeting", max_lines=1),
227
+ gr.Slider(1, 200, value=20, step=1, label="Limit"),
228
+ gr.Textbox(label="Memory ID / Prefix", placeholder="UUID or prefix (delete)", max_lines=1),
229
+ gr.Checkbox(value=True, label="Include Tags"),
230
+ ],
231
+ outputs=gr.Textbox(label="Result", lines=14),
232
+ title="Memory Manager",
233
+ description=(
234
+ "<div style=\"text-align:center\">Lightweight local JSON memory store (no external DB). Choose an Action, fill only the relevant fields, and run.</div>"
235
+ ),
236
+ api_description=(
237
+ "Manage short text memories with optional tags. Actions: save(text,tags), list(limit,include_tags), "
238
+ "search(query,limit,include_tags), delete(memory_id). Enhanced search supports tag:name queries and AND/OR operators. "
239
+ "Examples: 'tag:work', 'tag:work AND tag:project', 'meeting tag:work', 'tag:urgent OR important'. "
240
+ "Action parameter is always required. Use Memory_Manager whenever you are given information worth remembering about the user, "
241
+ "and search for memories when relevant."
242
+ ),
243
+ flagging_mode="never",
244
+ )
245
+
246
+
247
+ __all__ = ["Memory_Manager", "build_interface", "_load_memories", "_save_memories"]
Modules/Web_Fetch.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import re
4
+ from typing import Annotated, Dict, List, Tuple
5
+ from urllib.parse import urlparse, urljoin
6
+
7
+ import gradio as gr
8
+ import requests
9
+ from bs4 import BeautifulSoup
10
+ from markdownify import markdownify as md
11
+ from readability import Document
12
+
13
+ from app import _fetch_rate_limiter, _log_call_end, _log_call_start, _truncate_for_log
14
+
15
+
16
+ def _http_get_enhanced(url: str, timeout: int | float = 30, *, skip_rate_limit: bool = False) -> requests.Response:
17
+ headers = {
18
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
19
+ "Accept-Language": "en-US,en;q=0.9",
20
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
21
+ "Accept-Encoding": "gzip, deflate, br",
22
+ "DNT": "1",
23
+ "Connection": "keep-alive",
24
+ "Upgrade-Insecure-Requests": "1",
25
+ }
26
+ if not skip_rate_limit:
27
+ _fetch_rate_limiter.acquire()
28
+ try:
29
+ response = requests.get(
30
+ url,
31
+ headers=headers,
32
+ timeout=timeout,
33
+ allow_redirects=True,
34
+ stream=False,
35
+ )
36
+ response.raise_for_status()
37
+ return response
38
+ except requests.exceptions.Timeout as exc:
39
+ raise requests.exceptions.RequestException("Request timed out. The webpage took too long to respond.") from exc
40
+ except requests.exceptions.ConnectionError as exc:
41
+ raise requests.exceptions.RequestException("Connection error. Please check the URL and your internet connection.") from exc
42
+ except requests.exceptions.HTTPError as exc:
43
+ if response.status_code == 403:
44
+ raise requests.exceptions.RequestException("Access forbidden. The website may be blocking automated requests.") from exc
45
+ if response.status_code == 404:
46
+ raise requests.exceptions.RequestException("Page not found. Please check the URL.") from exc
47
+ if response.status_code == 429:
48
+ raise requests.exceptions.RequestException("Rate limited. Please try again in a few minutes.") from exc
49
+ raise requests.exceptions.RequestException(f"HTTP error {response.status_code}: {exc}") from exc
50
+
51
+
52
+ def _normalize_whitespace(text: str) -> str:
53
+ text = re.sub(r"[ \t\u00A0]+", " ", text)
54
+ text = re.sub(r"\n\s*\n\s*\n+", "\n\n", text.strip())
55
+ return text.strip()
56
+
57
+
58
+ def _truncate(text: str, max_chars: int) -> Tuple[str, bool]:
59
+ if max_chars is None or max_chars <= 0 or len(text) <= max_chars:
60
+ return text, False
61
+ return text[:max_chars].rstrip() + " …", True
62
+
63
+
64
+ def _shorten(text: str, limit: int) -> str:
65
+ if limit <= 0 or len(text) <= limit:
66
+ return text
67
+ return text[: max(0, limit - 1)].rstrip() + "…"
68
+
69
+
70
+ def _domain_of(url: str) -> str:
71
+ try:
72
+ return urlparse(url).netloc or ""
73
+ except Exception:
74
+ return ""
75
+
76
+
77
+ def _extract_links_from_soup(soup: BeautifulSoup, base_url: str) -> str:
78
+ links = []
79
+ for link in soup.find_all("a", href=True):
80
+ href = link.get("href")
81
+ text = link.get_text(strip=True)
82
+ if href.startswith("http"):
83
+ full_url = href
84
+ elif href.startswith("//"):
85
+ full_url = "https:" + href
86
+ elif href.startswith("/"):
87
+ full_url = urljoin(base_url, href)
88
+ else:
89
+ full_url = urljoin(base_url, href)
90
+ if text and href not in ["#", "javascript:void(0)"]:
91
+ links.append(f"- [{text}]({full_url})")
92
+ if not links:
93
+ return "No links found on this page."
94
+ title = soup.find("title")
95
+ title_text = title.get_text(strip=True) if title else "Links from webpage"
96
+ return f"# {title_text}\n\n" + "\n".join(links)
97
+
98
+
99
+ def _fullpage_markdown_from_soup(full_soup: BeautifulSoup, base_url: str, strip_selectors: str = "") -> str:
100
+ if strip_selectors:
101
+ selectors = [s.strip() for s in strip_selectors.split(",") if s.strip()]
102
+ for selector in selectors:
103
+ try:
104
+ for element in full_soup.select(selector):
105
+ element.decompose()
106
+ except Exception:
107
+ continue
108
+ for element in full_soup.select("script, style, nav, footer, header, aside"):
109
+ element.decompose()
110
+ main = (
111
+ full_soup.find("main")
112
+ or full_soup.find("article")
113
+ or full_soup.find("div", class_=re.compile(r"content|main|post|article", re.I))
114
+ or full_soup.find("body")
115
+ )
116
+ if not main:
117
+ return "No main content found on the webpage."
118
+ markdown_text = md(str(main), heading_style="ATX")
119
+ markdown_text = re.sub(r"\n{3,}", "\n\n", markdown_text)
120
+ markdown_text = re.sub(r"\[\s*\]\([^)]*\)", "", markdown_text)
121
+ markdown_text = re.sub(r"[ \t]+", " ", markdown_text)
122
+ markdown_text = markdown_text.strip()
123
+ title = full_soup.find("title")
124
+ if title and title.get_text(strip=True):
125
+ markdown_text = f"# {title.get_text(strip=True)}\n\n{markdown_text}"
126
+ return markdown_text or "No content could be extracted."
127
+
128
+
129
+ def _truncate_markdown(markdown: str, max_chars: int) -> Tuple[str, Dict[str, object]]:
130
+ total_chars = len(markdown)
131
+ if total_chars <= max_chars:
132
+ return markdown, {
133
+ "truncated": False,
134
+ "returned_chars": total_chars,
135
+ "total_chars_estimate": total_chars,
136
+ "next_cursor": None,
137
+ }
138
+ truncated = markdown[:max_chars]
139
+ last_paragraph = truncated.rfind("\n\n")
140
+ if last_paragraph > max_chars * 0.7:
141
+ truncated = truncated[:last_paragraph]
142
+ cursor_pos = last_paragraph
143
+ elif "." in truncated[-100:]:
144
+ last_period = truncated.rfind(".")
145
+ if last_period > max_chars * 0.8:
146
+ truncated = truncated[: last_period + 1]
147
+ cursor_pos = last_period + 1
148
+ else:
149
+ cursor_pos = len(truncated)
150
+ else:
151
+ cursor_pos = len(truncated)
152
+ metadata = {
153
+ "truncated": True,
154
+ "returned_chars": len(truncated),
155
+ "total_chars_estimate": total_chars,
156
+ "next_cursor": cursor_pos,
157
+ }
158
+ truncated = truncated.rstrip()
159
+ truncation_notice = (
160
+ "\n\n---\n"
161
+ f"**Content Truncated:** Showing {metadata['returned_chars']:,} of {metadata['total_chars_estimate']:,} characters "
162
+ f"({(metadata['returned_chars']/metadata['total_chars_estimate']*100):.1f}%)\n"
163
+ f"**Next cursor:** {metadata['next_cursor']} (use this value with offset parameter for continuation)\n"
164
+ "---"
165
+ )
166
+ return truncated + truncation_notice, metadata
167
+
168
+
169
+ def Web_Fetch(
170
+ url: Annotated[str, "The absolute URL to fetch (must return HTML)."],
171
+ max_chars: Annotated[int, "Maximum characters to return (0 = no limit, full page content)."] = 3000,
172
+ strip_selectors: Annotated[str, "CSS selectors to remove (comma-separated, e.g., '.header, .footer, nav')."] = "",
173
+ url_scraper: Annotated[bool, "Extract only links from the page instead of content."] = False,
174
+ offset: Annotated[int, "Character offset to start from (for pagination, use next_cursor from previous call)."] = 0,
175
+ ) -> str:
176
+ _log_call_start(
177
+ "Web_Fetch",
178
+ url=url,
179
+ max_chars=max_chars,
180
+ strip_selectors=strip_selectors,
181
+ url_scraper=url_scraper,
182
+ offset=offset,
183
+ )
184
+ if not url or not url.strip():
185
+ result = "Please enter a valid URL."
186
+ _log_call_end("Web_Fetch", _truncate_for_log(result))
187
+ return result
188
+ try:
189
+ resp = _http_get_enhanced(url)
190
+ resp.raise_for_status()
191
+ except requests.exceptions.RequestException as exc:
192
+ result = f"An error occurred: {exc}"
193
+ _log_call_end("Web_Fetch", _truncate_for_log(result))
194
+ return result
195
+ final_url = str(resp.url)
196
+ ctype = resp.headers.get("Content-Type", "")
197
+ if "html" not in ctype.lower():
198
+ result = f"Unsupported content type for extraction: {ctype or 'unknown'}"
199
+ _log_call_end("Web_Fetch", _truncate_for_log(result))
200
+ return result
201
+ resp.encoding = resp.encoding or resp.apparent_encoding
202
+ html = resp.text
203
+ full_soup = BeautifulSoup(html, "lxml")
204
+ if url_scraper:
205
+ result = _extract_links_from_soup(full_soup, final_url)
206
+ if offset > 0:
207
+ result = result[offset:]
208
+ if max_chars > 0 and len(result) > max_chars:
209
+ result, _ = _truncate_markdown(result, max_chars)
210
+ else:
211
+ full_result = _fullpage_markdown_from_soup(full_soup, final_url, strip_selectors)
212
+ if offset > 0:
213
+ if offset >= len(full_result):
214
+ result = (
215
+ f"Offset {offset} exceeds content length ({len(full_result)} characters). "
216
+ f"Content ends at position {len(full_result)}."
217
+ )
218
+ _log_call_end("Web_Fetch", _truncate_for_log(result))
219
+ return result
220
+ result = full_result[offset:]
221
+ else:
222
+ result = full_result
223
+ if max_chars > 0 and len(result) > max_chars:
224
+ result, metadata = _truncate_markdown(result, max_chars)
225
+ if offset > 0:
226
+ metadata["total_chars_estimate"] = len(full_result)
227
+ metadata["next_cursor"] = offset + metadata["next_cursor"] if metadata["next_cursor"] else None
228
+ _log_call_end("Web_Fetch", f"chars={len(result)}, url_scraper={url_scraper}, offset={offset}")
229
+ return result
230
+
231
+
232
+ def build_interface() -> gr.Interface:
233
+ return gr.Interface(
234
+ fn=Web_Fetch,
235
+ inputs=[
236
+ gr.Textbox(label="URL", placeholder="https://example.com/article", max_lines=1),
237
+ gr.Slider(minimum=0, maximum=20000, value=3000, step=100, label="Max Characters", info="0 = no limit (full page), default 3000"),
238
+ gr.Textbox(
239
+ label="Strip Selectors",
240
+ placeholder=".header, .footer, nav, .sidebar",
241
+ value="",
242
+ max_lines=1,
243
+ info="CSS selectors to remove (comma-separated)",
244
+ ),
245
+ gr.Checkbox(label="URL Scraper", value=False, info="Extract only links instead of content"),
246
+ gr.Slider(
247
+ minimum=0,
248
+ maximum=100000,
249
+ value=0,
250
+ step=100,
251
+ label="Offset",
252
+ info="Character offset to start from (use next_cursor from previous call for pagination)",
253
+ ),
254
+ ],
255
+ outputs=gr.Markdown(label="Extracted Content"),
256
+ title="Web Fetch",
257
+ description=(
258
+ "<div style=\"text-align:center\">Convert any webpage to clean Markdown format with precision controls, "
259
+ "or extract all links. Supports custom element removal, length limits, and pagination with offset.</div>"
260
+ ),
261
+ api_description=(
262
+ "Fetch a web page and return it converted to Markdown format or extract links with configurable options. "
263
+ "Includes enhanced truncation with detailed metadata and pagination support via offset parameter. "
264
+ "Parameters: url (str - absolute URL), max_chars (int - 0=no limit, default 3000), "
265
+ "strip_selectors (str - CSS selectors to remove, comma-separated), "
266
+ "url_scraper (bool - extract only links instead of content, default False), "
267
+ "offset (int - character offset for pagination, use next_cursor from previous call). "
268
+ "When content is truncated, returns detailed metadata including truncated status, character counts, "
269
+ "and next_cursor for continuation. When url_scraper=True, returns formatted list of all links found on the page."
270
+ ),
271
+ flagging_mode="never",
272
+ )
273
+
274
+
275
+ __all__ = [
276
+ "Web_Fetch",
277
+ "build_interface",
278
+ "_http_get_enhanced",
279
+ "_fullpage_markdown_from_soup",
280
+ ]
Modules/Web_Search.py ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Annotated, List
4
+
5
+ import gradio as gr
6
+ from ddgs import DDGS
7
+
8
+ from app import _log_call_end, _log_call_start, _search_rate_limiter, _truncate_for_log
9
+
10
+
11
+ def _extract_date_from_snippet(snippet: str) -> str:
12
+ if not snippet:
13
+ return ""
14
+ import re
15
+
16
+ date_patterns = [
17
+ r"\b(\d{4}[-/]\d{1,2}[-/]\d{1,2})\b",
18
+ r"\b([A-Za-z]{3,9}\s+\d{1,2},?\s+\d{4})\b",
19
+ r"\b(\d{1,2}\s+[A-Za-z]{3,9}\s+\d{4})\b",
20
+ r"\b(\d+\s+(?:day|week|month|year)s?\s+ago)\b",
21
+ r"(?:Published|Updated|Posted):\s*([^,\n]+?)(?:[,\n]|$)",
22
+ ]
23
+ for pattern in date_patterns:
24
+ matches = re.findall(pattern, snippet, re.IGNORECASE)
25
+ if matches:
26
+ return matches[0].strip()
27
+ return ""
28
+
29
+
30
+ def _format_search_result(result: dict, search_type: str, index: int) -> List[str]:
31
+ lines: List[str] = []
32
+ if search_type == "text":
33
+ title = result.get("title", "").strip()
34
+ url = result.get("href", "").strip()
35
+ snippet = result.get("body", "").strip()
36
+ date = _extract_date_from_snippet(snippet)
37
+ lines.append(f"{index}. {title}")
38
+ lines.append(f" URL: {url}")
39
+ if snippet:
40
+ lines.append(f" Summary: {snippet}")
41
+ if date:
42
+ lines.append(f" Date: {date}")
43
+ elif search_type == "news":
44
+ title = result.get("title", "").strip()
45
+ url = result.get("url", "").strip()
46
+ body = result.get("body", "").strip()
47
+ date = result.get("date", "").strip()
48
+ source = result.get("source", "").strip()
49
+ lines.append(f"{index}. {title}")
50
+ lines.append(f" URL: {url}")
51
+ if source:
52
+ lines.append(f" Source: {source}")
53
+ if date:
54
+ lines.append(f" Date: {date}")
55
+ if body:
56
+ lines.append(f" Summary: {body}")
57
+ elif search_type == "images":
58
+ title = result.get("title", "").strip()
59
+ image_url = result.get("image", "").strip()
60
+ source_url = result.get("url", "").strip()
61
+ source = result.get("source", "").strip()
62
+ width = result.get("width", "")
63
+ height = result.get("height", "")
64
+ lines.append(f"{index}. {title}")
65
+ lines.append(f" Image: {image_url}")
66
+ lines.append(f" Source: {source_url}")
67
+ if source:
68
+ lines.append(f" Publisher: {source}")
69
+ if width and height:
70
+ lines.append(f" Dimensions: {width}x{height}")
71
+ elif search_type == "videos":
72
+ title = result.get("title", "").strip()
73
+ description = result.get("description", "").strip()
74
+ duration = result.get("duration", "").strip()
75
+ published = result.get("published", "").strip()
76
+ uploader = result.get("uploader", "").strip()
77
+ embed_url = result.get("embed_url", "").strip()
78
+ lines.append(f"{index}. {title}")
79
+ if embed_url:
80
+ lines.append(f" Video: {embed_url}")
81
+ if uploader:
82
+ lines.append(f" Uploader: {uploader}")
83
+ if duration:
84
+ lines.append(f" Duration: {duration}")
85
+ if published:
86
+ lines.append(f" Published: {published}")
87
+ if description:
88
+ lines.append(f" Description: {description}")
89
+ elif search_type == "books":
90
+ title = result.get("title", "").strip()
91
+ url = result.get("url", "").strip()
92
+ body = result.get("body", "").strip()
93
+ lines.append(f"{index}. {title}")
94
+ lines.append(f" URL: {url}")
95
+ if body:
96
+ lines.append(f" Description: {body}")
97
+ return lines
98
+
99
+
100
+ def Web_Search(
101
+ query: Annotated[str, "The search query (supports operators like site:, quotes, OR)."],
102
+ max_results: Annotated[int, "Number of results to return (1–20)."] = 5,
103
+ page: Annotated[int, "Page number for pagination (1-based, each page contains max_results items)."] = 1,
104
+ search_type: Annotated[str, "Type of search: 'text' (web pages), 'news', 'images', 'videos', or 'books'."] = "text",
105
+ offset: Annotated[int, "Result offset to start from (overrides page if > 0, for precise continuation)."] = 0,
106
+ ) -> str:
107
+ _log_call_start("Web_Search", query=query, max_results=max_results, page=page, search_type=search_type, offset=offset)
108
+ if not query or not query.strip():
109
+ result = "No search query provided. Please enter a search term."
110
+ _log_call_end("Web_Search", _truncate_for_log(result))
111
+ return result
112
+ max_results = max(1, min(20, max_results))
113
+ page = max(1, page)
114
+ offset = max(0, offset)
115
+ valid_types = ["text", "news", "images", "videos", "books"]
116
+ if search_type not in valid_types:
117
+ search_type = "text"
118
+ if offset > 0:
119
+ actual_offset = offset
120
+ calculated_page = (offset // max_results) + 1
121
+ else:
122
+ actual_offset = (page - 1) * max_results
123
+ calculated_page = page
124
+ total_needed = actual_offset + max_results
125
+ used_fallback = False
126
+ original_search_type = search_type
127
+
128
+ def _perform_search(stype: str) -> list[dict]:
129
+ try:
130
+ _search_rate_limiter.acquire()
131
+ with DDGS() as ddgs:
132
+ if stype == "text":
133
+ raw_gen = ddgs.text(query, max_results=total_needed + 10)
134
+ elif stype == "news":
135
+ raw_gen = ddgs.news(query, max_results=total_needed + 10)
136
+ elif stype == "images":
137
+ raw_gen = ddgs.images(query, max_results=total_needed + 10)
138
+ elif stype == "videos":
139
+ raw_gen = ddgs.videos(query, max_results=total_needed + 10)
140
+ else:
141
+ raw_gen = ddgs.books(query, max_results=total_needed + 10)
142
+ try:
143
+ return list(raw_gen)
144
+ except Exception as inner_exc:
145
+ if "no results" in str(inner_exc).lower() or "not found" in str(inner_exc).lower():
146
+ return []
147
+ raise inner_exc
148
+ except Exception as exc:
149
+ error_msg = f"Search failed: {str(exc)[:200]}"
150
+ lowered = str(exc).lower()
151
+ if "blocked" in lowered or "rate" in lowered:
152
+ error_msg = "Search temporarily blocked due to rate limiting. Please try again in a few minutes."
153
+ elif "timeout" in lowered:
154
+ error_msg = "Search timed out. Please try again with a simpler query."
155
+ elif "network" in lowered or "connection" in lowered:
156
+ error_msg = "Network connection error. Please check your internet connection and try again."
157
+ elif "no results" in lowered or "not found" in lowered:
158
+ return []
159
+ raise Exception(error_msg)
160
+
161
+ try:
162
+ raw = _perform_search(search_type)
163
+ except Exception as exc:
164
+ result = f"Error: {exc}"
165
+ _log_call_end("Web_Search", _truncate_for_log(result))
166
+ return result
167
+
168
+ if not raw and search_type == "news":
169
+ try:
170
+ raw = _perform_search("text")
171
+ if raw:
172
+ used_fallback = True
173
+ search_type = "text"
174
+ except Exception:
175
+ pass
176
+
177
+ if not raw:
178
+ fallback_note = " (also tried 'text' search as fallback)" if original_search_type == "news" and used_fallback else ""
179
+ result = f"No {original_search_type} results found for query: {query}{fallback_note}"
180
+ _log_call_end("Web_Search", _truncate_for_log(result))
181
+ return result
182
+
183
+ paginated_results = raw[actual_offset: actual_offset + max_results]
184
+ if not paginated_results:
185
+ if actual_offset >= len(raw):
186
+ result = f"Offset {actual_offset} exceeds available results ({len(raw)} total). Try offset=0 to start from beginning."
187
+ else:
188
+ result = f"No {original_search_type} results found on page {calculated_page} for query: {query}. Try page 1 or reduce page number."
189
+ _log_call_end("Web_Search", _truncate_for_log(result))
190
+ return result
191
+
192
+ total_available = len(raw)
193
+ start_num = actual_offset + 1
194
+ end_num = actual_offset + len(paginated_results)
195
+ next_offset = actual_offset + len(paginated_results)
196
+ search_label = original_search_type.title()
197
+ if used_fallback:
198
+ search_label += " → Text (Smart Fallback)"
199
+ pagination_info = f"Page {calculated_page}"
200
+ if offset > 0:
201
+ pagination_info = f"Offset {actual_offset} (≈ {pagination_info})"
202
+ lines = [f"{search_label} search results for: {query}"]
203
+ if used_fallback:
204
+ lines.append("📍 Note: News search returned no results, automatically searched general web content instead")
205
+ lines.append(f"{pagination_info} (results {start_num}-{end_num} of ~{total_available}+ available)\n")
206
+ for i, result in enumerate(paginated_results, start_num):
207
+ result_lines = _format_search_result(result, search_type, i)
208
+ lines.extend(result_lines)
209
+ lines.append("")
210
+ if total_available > end_num:
211
+ lines.append("💡 More results available:")
212
+ lines.append(f" • Next page: page={calculated_page + 1}")
213
+ lines.append(f" • Next offset: offset={next_offset}")
214
+ lines.append(f" • Use offset={next_offset} to continue exactly from result {next_offset + 1}")
215
+ result = "\n".join(lines)
216
+ search_info = f"type={original_search_type}"
217
+ if used_fallback:
218
+ search_info += "→text"
219
+ _log_call_end("Web_Search", f"{search_info} page={calculated_page} offset={actual_offset} results={len(paginated_results)} chars={len(result)}")
220
+ return result
221
+
222
+
223
+ def build_interface() -> gr.Interface:
224
+ return gr.Interface(
225
+ fn=Web_Search,
226
+ inputs=[
227
+ gr.Textbox(label="Query", placeholder="topic OR site:example.com", max_lines=1),
228
+ gr.Slider(minimum=1, maximum=20, value=5, step=1, label="Max results"),
229
+ gr.Slider(minimum=1, maximum=10, value=1, step=1, label="Page", info="Page number for pagination (ignored if offset > 0)"),
230
+ gr.Radio(
231
+ label="Search Type",
232
+ choices=["text", "news", "images", "videos", "books"],
233
+ value="text",
234
+ info="Type of content to search for",
235
+ ),
236
+ gr.Slider(
237
+ minimum=0,
238
+ maximum=1000,
239
+ value=0,
240
+ step=1,
241
+ label="Offset",
242
+ info="Result offset to start from (overrides page if > 0, use next_offset from previous search)",
243
+ ),
244
+ ],
245
+ outputs=gr.Textbox(label="Search Results", interactive=False, lines=20, max_lines=20),
246
+ title="Web Search",
247
+ description=(
248
+ "<div style=\"text-align:center\">Multi-type web search with readable output format, date detection, and flexible pagination. "
249
+ "Supports text, news, images, videos, and books. Features smart fallback for news searches and precise offset control.</div>"
250
+ ),
251
+ api_description=(
252
+ "Run a web search (DuckDuckGo backend) with support for multiple content types and return formatted results. "
253
+ "Features smart fallback: if 'news' search returns no results, automatically retries with 'text' search "
254
+ "to catch sources like Hacker News that might not appear in news-specific results. "
255
+ "Supports advanced search operators: site: for specific domains, quotes for exact phrases, "
256
+ "OR for alternatives, and - to exclude terms. Examples: 'Python programming', 'site:example.com', "
257
+ "'\"artificial intelligence\"', 'cats -dogs', 'Python OR JavaScript'. "
258
+ "Parameters: query (str), max_results (int, 1-20), page (int, 1-based pagination), "
259
+ "search_type (str: text/news/images/videos/books), offset (int, result offset for precise continuation). "
260
+ "If offset > 0, it overrides the page parameter. Returns appropriately formatted results with metadata, "
261
+ "pagination hints, and next_offset information for each content type."
262
+ ),
263
+ flagging_mode="never",
264
+ submit_btn="Search",
265
+ )
266
+
267
+
268
+ __all__ = ["Web_Search", "build_interface"]
Modules/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Gradio tool modules bundled for Nymbo-Tools."""