finewiki-viewer / app.py
guipenedo's picture
guipenedo HF Staff
clean up design
cb8932e
import gradio as gr
import re
from datatrove.pipeline.readers import ParquetReader
from default_wiki_pipeline import _parse_and_clean_wikicode, mwparserfromhell
lang_list = ['ab', 'ace', 'ady', 'af', 'als', 'alt', 'ami', 'am', 'ang', 'anp', 'an', 'arc', 'ar', 'ary', 'arz', 'ast', 'as', 'atj', 'avk', 'av', 'awa', 'ay', 'azb', 'az', 'ban', 'bar', 'bat_smg', 'ba', 'bbc', 'bcl', 'be', 'bg', 'bh', 'bi', 'bjn', 'blk', 'bm', 'bn', 'bo', 'bpy', 'br', 'bs', 'bug', 'bxr', 'ca', 'cbk_zam', 'cdo', 'ceb', 'ce', 'chr', 'ch', 'chy', 'ckb', 'co', 'crh', 'cr', 'csb', 'cs', 'cu', 'cv', 'cy', 'dag', 'da', 'de', 'dga', 'din', 'diq', 'dsb', 'dty', 'dv', 'dz', 'ee', 'el', 'eml', 'en', 'eo', 'es', 'et', 'eu', 'ext', 'fat', 'fa', 'ff', 'fiu_vro', 'fi', 'fj', 'fon', 'fo', 'frp', 'frr', 'fr', 'fur', 'fy', 'gag', 'gan', 'ga', 'gcr', 'gd', 'glk', 'gl', 'gn', 'gom', 'gor', 'got', 'gpe', 'guc', 'gur', 'gu', 'guw', 'gv', 'hak', 'ha', 'haw', 'he', 'hif', 'hi', 'hr', 'hsb', 'ht', 'hu', 'hy', 'hyw', 'ia', 'id', 'ie', 'ig', 'ik', 'ilo', 'inh', 'io', 'is', 'it', 'iu', 'jam', 'ja', 'jbo', 'jv', 'kaa', 'kab', 'ka', 'kbd', 'kbp', 'kcg', 'kg', 'ki', 'kk', 'kl', 'km', 'kn', 'koi', 'ko', 'krc', 'ksh', 'ks', 'ku', 'kv', 'kw', 'ky', 'lad', 'la', 'lbe', 'lb', 'lez', 'lfn', 'lg', 'lij', 'li', 'lld', 'lmo', 'ln', 'lo', 'ltg', 'lt', 'lv', 'mad', 'mai', 'map_bms', 'mdf', 'mg', 'mhr', 'min', 'mi', 'mk', 'ml', 'mni', 'mn', 'mnw', 'mrj', 'mr', 'ms', 'mt', 'mwl', 'myv', 'my', 'mzn', 'nah', 'nap', 'nds_nl', 'nds', 'ne', 'new', 'nia', 'nl', 'nn', 'nov', 'no', 'nqo', 'nrm', 'nso', 'nv', 'ny', 'oc', 'olo', 'om', 'or', 'os', 'pag', 'pam', 'pap', 'pa', 'pcd', 'pcm', 'pdc', 'pfl', 'pih', 'pi', 'pl', 'pms', 'pnb', 'pnt', 'ps', 'pt', 'pwn', 'qu', 'rm', 'rmy', 'rn', 'roa_rup', 'roa_tara', 'ro', 'rue', 'ru', 'rw', 'sah', 'sat', 'sa', 'scn', 'sco', 'sc', 'sd', 'se', 'sg', 'shi', 'shn', 'sh', 'simple', 'si', 'skr', 'sk', 'sl', 'smn', 'sm', 'sn', 'so', 'sq', 'srn', 'sr', 'ss', 'stq', 'st', 'su', 'sv', 'sw', 'szl', 'szy', 'ta', 'tay', 'tcy', 'tet', 'te', 'tg', 'th', 'ti', 'tk', 'tl', 'tly', 'tn', 'to', 'tpi', 'trv', 'tr', 'ts', 'tt', 'tum', 'tw', 'tyv', 'ty', 'udm', 'ug', 'uk', 'ur', 'uz', 'vec', 'vep', 've', 'vi', 'vls', 'vo', 'war', 'wa', 'wo', 'wuu', 'xal', 'xh', 'xmf', 'yi', 'yo', 'za', 'zea', 'zgh', 'zh_classical', 'zh_min_nan', 'zh_yue', 'zh', 'zu']
def _build_header_markdown(doc) -> str:
meta = doc.metadata or {}
title = meta.get("title") or ""
page_id = meta.get("page_id") or meta.get("id") or ""
wikidata_id = meta.get("wikidata_id") or ""
url = meta.get("url") or ""
parts = []
if title:
parts.append(f"**Title**: {title}")
if page_id:
parts.append(f"**Page ID**: {page_id}")
if wikidata_id:
parts.append(f"**Wikidata ID**: {wikidata_id}")
header = " | ".join(parts)
if url:
header += f"\n\n[{url}]({url})"
return header
def has_markdown_table(md_text: str) -> bool:
return bool(re.search(r"(?m)^\s*\|.+\|\s*\n\s*\|?\s*:?-{3,}.*$", md_text or ""))
def has_code_fence(md_text: str) -> bool:
return "```" in (md_text or "")
def matches_prefilters(doc, require_has_math: bool | None, require_has_infobox: bool | None) -> bool:
meta = doc.metadata or {}
if require_has_math and not bool(meta.get("has_math")):
return False
if require_has_infobox and meta.get("infoboxes", "[]") == "[]":
return False
return True
def postfilters_ok(md_text: str, require_has_table: bool | None, require_has_code: bool | None) -> bool:
if require_has_table and not has_markdown_table(md_text):
return False
if require_has_code and not has_code_fence(md_text):
return False
return True
def format_for_markdown(text: str) -> str:
import re
# Replace '\n' not preceded by '|' with '\n\n'
return re.sub(r'(?<!\|)\n', '\n\n', text)
def find_next_valid(docs_cache, reader_iter, start_idx: int, require_has_math: bool | None, require_has_infobox: bool | None, require_has_table: bool | None, require_has_code: bool | None):
# Scan cache first (forward from start_idx)
i = max(-1, start_idx)
while i + 1 < len(docs_cache):
i += 1
if not matches_prefilters(docs_cache[i], require_has_math, require_has_infobox):
continue
left, left_meta, md, info, right, header = render_idx(docs_cache, i)
if postfilters_ok(left, require_has_table, require_has_code):
return i, docs_cache, reader_iter, left, md, left_meta, header, md, info, right
# Stream until found or exhausted
while True:
prev_len = len(docs_cache)
docs_cache, reader_iter = _ensure_until_index(docs_cache, reader_iter, prev_len)
if len(docs_cache) == prev_len:
break
if not matches_prefilters(docs_cache[-1], require_has_math, require_has_infobox):
continue
left, left_meta, md, info, right, header = render_idx(docs_cache, len(docs_cache) - 1)
if postfilters_ok(left, require_has_table, require_has_code):
idx = len(docs_cache) - 1
return idx, docs_cache, reader_iter, left, md, left_meta, header, md, info, right
return -1, docs_cache, reader_iter, "No documents match filters.", "", {}, "", "", [], render_iframe("")
def render_iframe(url: str, height: int = 800) -> str:
safe_url = url or "about:blank"
return (
f'<iframe src="{safe_url}" '
f'style="width:100%; height:{height}px; border:0;" loading="lazy"></iframe>'
)
def _safe_url_from_metadata(meta: dict) -> str:
meta = meta or {}
return meta.get("url") or ""
def _extract_language(meta: dict) -> str:
# Try common metadata fields for language code
meta = meta or {}
lang = meta.get("lang") or meta.get("language")
if lang:
return str(lang)
wiki = meta.get("wiki") or meta.get("wikiname") or ""
base = str(wiki).removesuffix("_namespace_0") if wiki else ""
if base.endswith("wiki"):
return base[:-4]
return base or "en"
def _ensure_until_index(docs_cache, reader_iter, target_idx: int):
if reader_iter is None:
return docs_cache, reader_iter
while len(docs_cache) <= target_idx:
try:
nxt = next(reader_iter)
except StopIteration:
break
docs_cache.append(nxt)
return docs_cache, reader_iter
def on_select_language(lang: str, require_has_math: bool, require_has_infobox: bool, require_has_table: bool, require_has_code: bool):
"""Load documents for the selected language from HF Parquet and display."""
language = (lang or "").strip()
if not language:
return (-1, [], None, "Select a language.", {}, "", [], render_iframe(""))
try:
path = f"hf://datasets/HuggingFaceFW/finewiki/data/{language}wiki"
reader_iter = ParquetReader(path)()
except Exception as e:
return (-1, [], None, f"Failed to read: {e}", {}, "", [], render_iframe(""))
docs_cache = []
docs_cache, reader_iter = _ensure_until_index(docs_cache, reader_iter, 0)
if not docs_cache:
return (-1, [], reader_iter, "No documents found.", {}, "", [], render_iframe(""))
# Find first doc matching pre- and post-filters
idx, docs_cache, reader_iter, left, md, left_meta, header, right_md, info, right = find_next_valid(
docs_cache, reader_iter, -1, require_has_math, require_has_infobox, require_has_table, require_has_code
)
if idx == -1:
return (-1, docs_cache, reader_iter, "No documents match filters.", "", {}, "", "", [], render_iframe(""))
return (idx, docs_cache, reader_iter, left, format_for_markdown(left), left_meta, header, right_md, info, right)
def show_doc(doc):
left = getattr(doc, "text", "")
meta = getattr(doc, "metadata", None) or {}
# Clean markdown using default_wiki_pipeline helper
md_text = meta.get("wikitext")
md_clean = _parse_and_clean_wikicode(md_text, parser=mwparserfromhell, language=_extract_language(meta))
info = meta.get("infoboxes", [])
right = render_iframe(_safe_url_from_metadata(meta))
header = _build_header_markdown(doc)
return left, meta, md_clean, info, right, header
def render_idx(docs, idx: int):
if not docs:
return "No documents.", {}, "", [], render_iframe(""), ""
idx = max(0, min(idx, len(docs) - 1))
doc = docs[idx]
left, left_meta, md, info, right, header = show_doc(doc)
return left, left_meta, md, info, right, header
def on_prev(docs_cache, idx: int, reader_iter, require_has_math: bool, require_has_infobox: bool, require_has_table: bool, require_has_code: bool):
if not docs_cache:
# Try to ensure at least first doc is loaded
docs_cache, reader_iter = _ensure_until_index(docs_cache, reader_iter, 0)
if not docs_cache:
return idx, docs_cache, reader_iter, "No documents.", {}, "", [], render_iframe("")
new_idx = max(0, idx - 1)
# Apply prefilters going backwards by scanning from start to new_idx; evaluate postfilters on candidate
filtered_idx = -1
for i in range(new_idx, -1, -1):
if not matches_prefilters(docs_cache[i], require_has_math, require_has_infobox):
continue
left, left_meta, md, info, right, header = render_idx(docs_cache, i)
if postfilters_ok(md, require_has_table, require_has_code):
filtered_idx = i
return filtered_idx, docs_cache, reader_iter, left, format_for_markdown(left), left_meta, header, md, info, right
return idx, docs_cache, reader_iter, "No documents match filters.", "", {}, "", "", [], render_iframe("")
def on_next(docs_cache, idx: int, reader_iter, require_has_math: bool, require_has_infobox: bool, require_has_table: bool, require_has_code: bool):
target_idx = idx + 1 if idx >= 0 else 0
docs_cache, reader_iter = _ensure_until_index(docs_cache, reader_iter, target_idx)
if not docs_cache:
return idx, docs_cache, reader_iter, "No documents.", {}, "", [], render_iframe("")
# Apply filters forward using new finder
new_idx, docs_cache, reader_iter, left, md, left_meta, header, right_md, info, right = find_next_valid(
docs_cache, reader_iter, idx, require_has_math, require_has_infobox, require_has_table, require_has_code
)
if new_idx == -1:
return idx, docs_cache, reader_iter, "No documents match filters.", "", {}, "", "", [], render_iframe("")
return new_idx, docs_cache, reader_iter, left, format_for_markdown(left), left_meta, header, right_md, info, right
with gr.Blocks() as demo:
idx_state = gr.State(value=-1, time_to_live=900)
docs_state = gr.State(value=[], time_to_live=900)
iter_state = gr.State(value=None, time_to_live=900)
# Full-width controls row for navigation
with gr.Row():
with gr.Column():
with gr.Row():
header_md = gr.Markdown()
with gr.Row():
prev_btn = gr.Button("Previous")
next_btn = gr.Button("Next")
with gr.Column():
with gr.Row():
with gr.Column(scale=1):
language_select = gr.Dropdown(choices=lang_list, value="en", label="Language")
with gr.Column(scale=2):
with gr.Row():
require_has_math = gr.Checkbox(label="Has math", value=False)
require_has_infobox = gr.Checkbox(label="Has infobox", value=False)
require_has_table = gr.Checkbox(label="Has table", value=False)
require_has_code = gr.Checkbox(label="Has pre/code", value=False)
with gr.Row():
with gr.Column():
with gr.Tab("FineWiki markdown"):
left_text_md = gr.Markdown(label="FineWiki markdown")
with gr.Tab("FineWiki raw"):
left_text_raw = gr.Textbox(label="FineWiki extraction", lines=30, elem_id="left_text_box")
with gr.Tab("FineWiki metadata"):
left_meta = gr.JSON(label="Metadata")
with gr.Tab("FineWiki infoboxes"):
right_infoboxes = gr.JSON(label="Infoboxes")
with gr.Row():
prev_btn2 = gr.Button("Previous")
next_btn2 = gr.Button("Next")
with gr.Column():
with gr.Tab("Preview"):
right_iframe = gr.HTML(label="Original Page")
with gr.Tab("wikimedia/wikipedia"):
right_markdown = gr.Textbox(label="wikimedia/wikipedia extraction", lines=30, elem_id="right_markdown_box")
language_select.change(on_select_language, inputs=[language_select, require_has_math, require_has_infobox, require_has_table, require_has_code], outputs=[idx_state, docs_state, iter_state, left_text_raw, left_text_md, left_meta, header_md, right_markdown, right_infoboxes, right_iframe], concurrency_limit=1)
demo.load(on_select_language, inputs=[language_select, require_has_math, require_has_infobox, require_has_table, require_has_code], outputs=[idx_state, docs_state, iter_state, left_text_raw, left_text_md, left_meta, header_md, right_markdown, right_infoboxes, right_iframe], concurrency_limit=1)
# find_btn.click(on_find, inputs=[docs_state, idx_state, iter_state, id_input, require_has_math, require_has_infobox], outputs=[idx_state, docs_state, iter_state, left_text, left_meta, header_md, right_markdown, right_infoboxes, right_iframe], concurrency_limit=1)
# Visibility toggles driven directly by checkbox changes
prev_btn.click(on_prev, inputs=[docs_state, idx_state, iter_state, require_has_math, require_has_infobox, require_has_table, require_has_code], outputs=[idx_state, docs_state, iter_state, left_text_raw, left_text_md, left_meta, header_md, right_markdown, right_infoboxes, right_iframe], concurrency_limit=1)
next_btn.click(on_next, inputs=[docs_state, idx_state, iter_state, require_has_math, require_has_infobox, require_has_table, require_has_code], outputs=[idx_state, docs_state, iter_state, left_text_raw, left_text_md, left_meta, header_md, right_markdown, right_infoboxes, right_iframe], concurrency_limit=1)
prev_btn2.click(on_prev, inputs=[docs_state, idx_state, iter_state, require_has_math, require_has_infobox, require_has_table, require_has_code], outputs=[idx_state, docs_state, iter_state, left_text_raw, left_text_md, left_meta, header_md, right_markdown, right_infoboxes, right_iframe], concurrency_limit=1)
next_btn2.click(on_next, inputs=[docs_state, idx_state, iter_state, require_has_math, require_has_infobox, require_has_table, require_has_code], outputs=[idx_state, docs_state, iter_state, left_text_raw, left_text_md, left_meta, header_md, right_markdown, right_infoboxes, right_iframe], concurrency_limit=1)
# Enable global queue to coordinate concurrent requests safely
demo.queue(default_concurrency_limit=1, max_size=128)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", state_session_capacity=5000)