File size: 20,696 Bytes
d369d82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
from __future__ import annotations

import os
import re
import tempfile
import time
from collections import deque
from concurrent.futures import Future, ThreadPoolExecutor, as_completed
from typing import Annotated, Dict, List, Tuple
from urllib.parse import urlparse

import gradio as gr
import requests
from bs4 import BeautifulSoup
from ddgs import DDGS
from huggingface_hub import InferenceClient

from .Web_Fetch import _fullpage_markdown_from_soup, _http_get_enhanced
from app import _log_call_end, _log_call_start, _search_rate_limiter, _truncate_for_log

HF_TEXTGEN_TOKEN = os.getenv("HF_READ_TOKEN") or os.getenv("HF_TOKEN")


class SlowHost(Exception):
    pass


def _normalize_query(q: str) -> str:
    if not q:
        return ""
    repl = {"“": '"', "”": '"', "‘": "'", "’": "'", "`": "'"}
    for key, value in repl.items():
        q = q.replace(key, value)
    q = re.sub(r"\s+", " ", q)
    q = re.sub(r'"\s+"', " ", q)
    q = q.strip().strip('"').strip()
    return q


def _search_urls_only(query: str, max_results: int) -> list[str]:
    if not query or not query.strip() or max_results <= 0:
        return []
    urls: list[str] = []
    try:
        _search_rate_limiter.acquire()
        with DDGS() as ddgs:
            for item in ddgs.text(query, region="wt-wt", safesearch="moderate", max_results=max_results):
                url = (item.get("href") or item.get("url") or "").strip()
                if url:
                    urls.append(url)
    except Exception:
        pass
    seen = set()
    deduped = []
    for url in urls:
        if url not in seen:
            seen.add(url)
            deduped.append(url)
    return deduped


def _fetch_page_markdown_fast(url: str, max_chars: int = 3000, timeout: float = 10.0) -> str:
    try:
        resp = _http_get_enhanced(url, timeout=timeout, skip_rate_limit=True)
        resp.raise_for_status()
    except requests.exceptions.RequestException as exc:
        msg = str(exc)
        if "timed out" in msg.lower():
            raise SlowHost(msg) from exc
        return ""
    final_url = str(resp.url)
    ctype = resp.headers.get("Content-Type", "")
    if "html" not in ctype.lower():
        return ""
    resp.encoding = resp.encoding or resp.apparent_encoding
    html = resp.text
    soup = BeautifulSoup(html, "lxml")
    md_text = _fullpage_markdown_from_soup(soup, final_url, "")
    if max_chars > 0 and len(md_text) > max_chars:
        md_text = md_text[:max_chars]
    return md_text


def _truncate_join(parts: List[str], max_chars: int) -> Tuple[str, bool]:
    out = []
    total = 0
    truncated = False
    for part in parts:
        if not part:
            continue
        if total + len(part) > max_chars:
            out.append(part[: max(0, max_chars - total)])
            truncated = True
            break
        out.append(part)
        total += len(part)
    return ("\n\n".join(out), truncated)


def _build_research_prompt(summary: str, queries: List[str], url_list: List[str], pages_map: Dict[str, str]) -> str:
    researcher_instructions = (
        "You are Nymbot, a helpful deep research assistant. You will be asked a Query from a user and you will create a long, comprehensive, well-structured research report in response to the user's Query.\n\n"
        "You have been provided with User Question, Search Queries, and numerous webpages that the searches yielded.\n\n"
        "<report_format>\n"
        "Write a well-formatted report in the structure of a scientific report to a broad audience. The report must be readable and have a nice flow of Markdown headers and paragraphs of text. Do NOT use bullet points or lists which break up the natural flow. The report must be exhaustive for comprehensive topics.\n"
        "For any given user query, first determine the major themes or areas that need investigation, then structure these as main sections, and develop detailed subsections that explore various facets of each theme. Each section and subsection requires paragraphs of texts that need to all connect into one narrative flow.\n"
        "</report_format>\n\n"
        "<document_structure>\n"
        "- Always begin with a clear title using a single # header\n"
        "- Organize content into major sections using ## headers\n"
        "- Further divide into subsections using ### headers\n"
        "- Use #### headers sparingly for special subsections\n"
        "- Never skip header levels\n"
        "- Write multiple paragraphs per section or subsection\n"
        "- Each paragraph must contain at least 4-5 sentences, present novel insights and analysis grounded in source material, connect ideas to original query, and build upon previous paragraphs to create a narrative flow\n"
        "- Never use lists, instead always use text or tables\n\n"
        "Mandatory Section Flow:\n"
        "1. Title (# level)\n   - Before writing the main report, start with one detailed paragraph summarizing key findings\n"
        "2. Main Body Sections (## level)\n   - Each major topic gets its own section (## level). There MUST BE at least 5 sections.\n   - Use ### subsections for detailed analysis\n   - Every section or subsection needs at least one paragraph of narrative before moving to the next section\n   - Do NOT have a section titled \"Main Body Sections\" and instead pick informative section names that convey the theme of the section\n"
        "3. Conclusion (## level)\n   - Synthesis of findings\n   - Potential recommendations or next steps\n"
        "</document_structure>\n\n"
        "<planning_rules>\n"
        "- Always break it down into multiple steps\n"
        "- Assess the different sources and whether they are useful for any steps needed to answer the query\n"
        "- Create the best report that weighs all the evidence from the sources\n"
        "- Remember that the current date is: Wednesday, April 23, 2025, 11:50 AM EDT\n"
        "- Make sure that your final report addresses all parts of the query\n"
        "- Communicate a brief high-level plan in the introduction; do not reveal chain-of-thought.\n"
        "- When referencing sources during analysis, you should still refer to them by index with brackets and follow <citations>\n"
        "- As a final step, review your planned report structure and ensure it completely answers the query.\n"
        "</planning_rules>\n\n"
    )
    sources_blocks: List[str] = []
    indexed_urls: List[str] = []
    for idx, url in enumerate(url_list, start=1):
        text = pages_map.get(url, "").strip()
        if not text:
            continue
        indexed_urls.append(f"[{idx}] {url}")
        sources_blocks.append(f"[Source {idx}] URL: {url}\n\n{text}")
    sources_joined, truncated = _truncate_join(sources_blocks, max_chars=100_000)
    prompt_parts = [researcher_instructions]
    prompt_parts.append("<user_query_summary>\n" + (summary or "") + "\n</user_query_summary>\n")
    populated = [q for q in queries if q and q.strip()]
    if populated:
        prompt_parts.append("<search_queries>\n" + "\n".join(f"- {q.strip()}" for q in populated) + "\n</search_queries>\n")
    if indexed_urls:
        prompt_parts.append("<sources_list>\n" + "\n".join(indexed_urls) + "\n</sources_list>\n")
    prompt_parts.append("<fetched_documents>\n" + sources_joined + ("\n\n[NOTE] Sources truncated due to context limits." if truncated else "") + "\n</fetched_documents>")
    return "\n\n".join(prompt_parts)


def _write_report_tmp(text: str) -> str:
    tmp_dir = tempfile.mkdtemp(prefix="deep_research_")
    path = os.path.join(tmp_dir, "research_report.txt")
    with open(path, "w", encoding="utf-8") as file:
        file.write(text)
    return path


def Deep_Research(

    summary: Annotated[str, "Summarization of research topic (one or more sentences)."],

    query1: Annotated[str, "DDG Search Query 1"],

    max1: Annotated[int, "Max results for Query 1 (1-50)"] = 10,

    query2: Annotated[str, "DDG Search Query 2"] = "",

    max2: Annotated[int, "Max results for Query 2 (1-50)"] = 10,

    query3: Annotated[str, "DDG Search Query 3"] = "",

    max3: Annotated[int, "Max results for Query 3 (1-50)"] = 10,

    query4: Annotated[str, "DDG Search Query 4"] = "",

    max4: Annotated[int, "Max results for Query 4 (1-50)"] = 10,

    query5: Annotated[str, "DDG Search Query 5"] = "",

    max5: Annotated[int, "Max results for Query 5 (1-50)"] = 10,

) -> tuple[str, str, str]:
    _log_call_start(
        "Deep_Research",
        summary=_truncate_for_log(summary or "", 200),
        queries=[q for q in [query1, query2, query3, query4, query5] if q],
    )
    if not HF_TEXTGEN_TOKEN:
        _log_call_end("Deep_Research", "error=missing HF token")
        raise gr.Error("Please provide a `HF_READ_TOKEN` to enable Deep Research.")
    queries = [
        _normalize_query(query1 or ""),
        _normalize_query(query2 or ""),
        _normalize_query(query3 or ""),
        _normalize_query(query4 or ""),
        _normalize_query(query5 or ""),
    ]
    reqs = [
        max(1, min(50, int(max1))),
        max(1, min(50, int(max2))),
        max(1, min(50, int(max3))),
        max(1, min(50, int(max4))),
        max(1, min(50, int(max5))),
    ]
    total_requested = sum(reqs)
    if total_requested > 50:
        reqs = [10, 10, 10, 10, 10]
    start_ts = time.time()
    budget_seconds = 55.0
    deadline = start_ts + budget_seconds

    def time_left() -> float:
        return max(0.0, deadline - time.time())

    all_urls: list[str] = []
    tasks = []
    with ThreadPoolExecutor(max_workers=min(5, sum(1 for q in queries if q.strip())) or 1) as executor:
        for query, count in zip(queries, reqs):
            if not query.strip():
                continue
            tasks.append(executor.submit(_search_urls_only, query.strip(), count))
        for future in as_completed(tasks):
            try:
                urls = future.result() or []
            except Exception:
                urls = []
            for url in urls:
                if url not in all_urls:
                    all_urls.append(url)
            if len(all_urls) >= 50:
                break
            if time_left() <= 0.5:
                break
    if len(all_urls) > 50:
        all_urls = all_urls[:50]
    blacklist = {
        "homedepot.com",
        "tractorsupply.com",
        "mcmaster.com",
        "mrchain.com",
        "answers.com",
        "city-data.com",
        "dictionary.cambridge.org",
    }

    def _domain(url: str) -> str:
        try:
            return urlparse(url).netloc.lower()
        except Exception:
            return ""

    all_urls = [url for url in all_urls if _domain(url) not in blacklist]
    skip_exts = (
        ".pdf",
        ".ppt",
        ".pptx",
        ".doc",
        ".docx",
        ".xls",
        ".xlsx",
        ".zip",
        ".gz",
        ".tgz",
        ".bz2",
        ".7z",
        ".rar",
    )

    def _skip_url(url: str) -> bool:
        try:
            path = urlparse(url).path.lower()
        except Exception:
            return False
        return any(path.endswith(ext) for ext in skip_exts)

    all_urls = [url for url in all_urls if not _skip_url(url)]
    pages: dict[str, str] = {}
    if all_urls:
        queue = deque(all_urls)
        attempts: dict[str, int] = {url: 0 for url in all_urls}
        max_attempts = 2
        max_workers = min(12, max(4, len(all_urls)))
        in_flight: dict[Future, str] = {}
        delayed: list[tuple[float, str]] = []

        def schedule_next(executor: ThreadPoolExecutor) -> None:
            while queue and len(in_flight) < max_workers:
                url = queue.popleft()
                if url in pages:
                    continue
                if attempts[url] >= max_attempts:
                    continue
                attempts[url] += 1
                tl = time_left()
                per_timeout = 10.0 if tl > 15 else (5.0 if tl > 8 else 2.0)
                future = executor.submit(_fetch_page_markdown_fast, url, 3000, per_timeout)
                in_flight[future] = url

        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            schedule_next(executor)
            while (in_flight or queue) and time_left() > 0.2:
                now = time.time()
                if delayed:
                    ready = []
                    not_ready = []
                    for ready_time, url in delayed:
                        (ready if ready_time <= now else not_ready).append((ready_time, url))
                    delayed = not_ready
                    for _, url in ready:
                        queue.append(url)
                    if ready:
                        schedule_next(executor)
                done = [future for future in list(in_flight.keys()) if future.done()]
                if not done:
                    if not queue and delayed:
                        sleep_for = max(0.02, min(0.25, max(0.0, min(t for t, _ in delayed) - time.time())))
                        time.sleep(sleep_for)
                    else:
                        time.sleep(0.05)
                else:
                    for future in done:
                        url = in_flight.pop(future)
                        try:
                            md = future.result()
                            if md and not md.startswith("Unsupported content type") and not md.startswith("An error occurred"):
                                pages[url] = md
                                try:
                                    print(f"[FETCH OK] {url} (chars={len(md)})", flush=True)
                                except Exception:
                                    pass
                        except SlowHost:
                            if time_left() > 5.0:
                                delayed.append((time.time() + 3.0, url))
                        except Exception:
                            pass
                    schedule_next(executor)
    prompt = _build_research_prompt(summary=summary or "", queries=[q for q in queries if q.strip()], url_list=list(pages.keys()), pages_map=pages)
    messages = [
        {"role": "system", "content": "You are Nymbot, an expert deep research assistant."},
        {"role": "user", "content": prompt},
    ]
    try:
        prompt_chars = len(prompt)
    except Exception:
        prompt_chars = -1
    print(f"[PIPELINE] Fetch complete: pages={len(pages)}, unique_urls={len(pages.keys())}, prompt_chars={prompt_chars}", flush=True)
    print("[PIPELINE] Starting inference (provider=cerebras, model=Qwen/Qwen3-235B-A22B-Thinking-2507)", flush=True)

    def _run_inference(provider: str, max_tokens: int, temp: float, top_p: float):
        client = InferenceClient(provider=provider, api_key=HF_TEXTGEN_TOKEN)
        return client.chat.completions.create(
            model="Qwen/Qwen3-235B-A22B-Thinking-2507",
            messages=messages,
            max_tokens=max_tokens,
            temperature=temp,
            top_p=top_p,
        )

    try:
        print("[LLM] Attempt 1: provider=cerebras, max_tokens=32768", flush=True)
        completion = _run_inference("cerebras", max_tokens=32768, temp=0.3, top_p=0.95)
    except Exception as exc1:
        print(f"[LLM] Attempt 1 failed: {str(exc1)[:200]}", flush=True)
        try:
            prompt2 = _build_research_prompt(
                summary=summary or "",
                queries=[q for q in queries if q.strip()],
                url_list=list(pages.keys())[:30],
                pages_map={key: pages[key] for key in list(pages.keys())[:30]},
            )
            messages = [
                {"role": "system", "content": "You are Nymbot, an expert deep research assistant."},
                {"role": "user", "content": prompt2},
            ]
            print("[LLM] Attempt 2: provider=cerebras (trimmed), max_tokens=16384", flush=True)
            completion = _run_inference("cerebras", max_tokens=16384, temp=0.7, top_p=0.95)
        except Exception as exc2:
            print(f"[LLM] Attempt 2 failed: {str(exc2)[:200]}", flush=True)
            try:
                print("[LLM] Attempt 3: provider=auto, max_tokens=8192", flush=True)
                completion = _run_inference("auto", max_tokens=8192, temp=0.7, top_p=0.95)
            except Exception as exc3:
                _log_call_end("Deep_Research", f"error={_truncate_for_log(str(exc3), 260)}")
                raise gr.Error(f"Researcher model call failed: {exc3}")
    raw = completion.choices[0].message.content or ""
    try:
        no_think = re.sub(r"<think>[\s\S]*?<\\/think>", "", raw, flags=re.IGNORECASE)
        no_think = re.sub(r"<\\/?think>", "", no_think, flags=re.IGNORECASE)
    except Exception:
        no_think = raw
    try:
        paragraphs = [p for p in re.split(r"\n\s*\n", no_think) if p.strip()]
        keep: List[str] = []
        removed = 0
        planning_re = re.compile(r"\b(let me|now i(?:'ll| will)?|first,|i will now|i will|i'll|let's|now let me|i need to|now i'll|now i will)\b", re.IGNORECASE)
        for paragraph in paragraphs:
            if planning_re.search(paragraph):
                removed += 1
                continue
            keep.append(paragraph)
        report = "\n\n".join(keep).strip()
        if not report:
            report = no_think.strip()
    except Exception:
        report = no_think
        removed = 0
    report = re.sub(r"\n\s*\n\s*\n+", "\n\n", report)
    try:
        print(f"[POSTPROCESS] removed_planning_paragraphs={removed}, raw_chars={len(raw)}, final_chars={len(report)}", flush=True)
    except Exception:
        pass
    links_text = "\n".join([f"[{i+1}] {url}" for i, url in enumerate(pages.keys())])
    file_path = _write_report_tmp(report)
    elapsed = time.time() - start_ts
    print(f"[TIMING] Deep_Research elapsed: {elapsed:.2f}s", flush=True)
    _log_call_end("Deep_Research", f"urls={len(pages)} file={os.path.basename(file_path)} duration={elapsed:.2f}s")
    return report, links_text, file_path


def build_interface() -> gr.Interface:
    return gr.Interface(
        fn=Deep_Research,
        inputs=[
            gr.Textbox(label="Summarization of research topic", lines=3, placeholder="Briefly summarize the research topic or user question"),
            gr.Textbox(label="DDG Search Query 1", max_lines=1),
            gr.Slider(1, 50, value=10, step=1, label="Max results (Q1)"),
            gr.Textbox(label="DDG Search Query 2", value="", max_lines=1),
            gr.Slider(1, 50, value=10, step=1, label="Max results (Q2)"),
            gr.Textbox(label="DDG Search Query 3", value="", max_lines=1),
            gr.Slider(1, 50, value=10, step=1, label="Max results (Q3)"),
            gr.Textbox(label="DDG Search Query 4", value="", max_lines=1),
            gr.Slider(1, 50, value=10, step=1, label="Max results (Q4)"),
            gr.Textbox(label="DDG Search Query 5", value="", max_lines=1),
            gr.Slider(1, 50, value=10, step=1, label="Max results (Q5)"),
        ],
        outputs=[
            gr.Markdown(label="Research Report"),
            gr.Textbox(label="Fetched Links", lines=8),
            gr.File(label="Download Research Report", file_count="single"),
        ],
        title="Deep Research",
        description=(
            "<div style=\"text-align:center\">Perform multi-query web research: search with DuckDuckGo, fetch up to 50 pages in parallel, "
            "and generate a comprehensive report using a large LLM via Hugging Face Inference Providers (Cerebras). Requires HF_READ_TOKEN.</div>"
        ),
        api_description=(
            "Runs 1–5 DDG searches (URLs only), caps total results to 50 (when exceeding, each query returns 10). "
            "Fetches all URLs (3000 chars each) and calls the Researcher to write a research report. "
            "Returns the report (Markdown), the list of sources, and a downloadable text file path. "
            "Provide the user with one-paragraph summary of the research report and the txt file in this format `![research_report.txt](URL)`"
        ),
        flagging_mode="never",
        show_api=bool(HF_TEXTGEN_TOKEN),
    )


__all__ = ["Deep_Research", "build_interface"]