Spaces:
Running
Running
| import os | |
| import subprocess | |
| import sys | |
| import streamlit as st | |
| # Ensure /app (root) is in path so /app/utils/ is importable as utils.* | |
| sys.path.append(os.path.dirname(os.path.abspath(__file__))) | |
| # Ensure /app is in sys.path so we can import utils.* from anywhere | |
| sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) | |
| # Environment setup | |
| os.environ["MODEL_PATH"] = "/tmp/models/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf" | |
| os.environ["STREAMLIT_HOME"] = "/tmp/.streamlit" | |
| os.environ["XDG_CONFIG_HOME"] = "/tmp/.streamlit" | |
| os.environ["BROWSER_GATHER_USAGE_STATS"] = "false" | |
| os.environ["HF_HUB_CACHE"] = "/tmp/hf_cache" | |
| # Create required directories | |
| os.makedirs("/tmp/.streamlit", exist_ok=True) | |
| os.makedirs("/tmp/hf_cache", exist_ok=True) | |
| os.makedirs("/tmp/models", exist_ok=True) | |
| # Runtime model download if needed | |
| MODEL_PATH = "/tmp/models/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf" | |
| # Always run download_model.py for troubleshooting | |
| #st.warning("Running model download step...") | |
| #try: | |
| # result = subprocess.run( | |
| # ["python3", "model/download_model.py"], | |
| # check=True, | |
| # capture_output=True, | |
| # text=True # ensures stdout/stderr are strings | |
| # ) | |
| # st.success("Model download attempted.") | |
| # st.text("STDOUT:") | |
| # st.text(result.stdout) | |
| # st.text("STDERR:") | |
| # st.text(result.stderr) | |
| #except subprocess.CalledProcessError as e: | |
| # st.error("Model download failed. Check HF_TOKEN or permissions.") | |
| # st.text(f"Exit code: {e.returncode}") | |
| # st.text(f"Command: {e.cmd}") | |
| # st.text("STDOUT:") | |
| # st.text(e.stdout or "No stdout") | |
| # st.text("STDERR:") | |
| # st.text(e.stderr or "No stderr") | |
| # st.stop() | |
| #end of temp code''' | |
| if not os.path.exists(MODEL_PATH): | |
| st.warning("Model not found. Downloading...") | |
| try: | |
| subprocess.run(["python3", "model/download_model.py"], check=True, capture_output=True) | |
| st.success("Model downloaded successfully.") | |
| except subprocess.CalledProcessError as e: | |
| st.error("Model download failed. Check HF_TOKEN or permissions.") | |
| st.text(f"Exit code: {e.returncode}") | |
| st.text(f"Command: {e.cmd}") | |
| st.text(f"Output: {e.output if hasattr(e, 'output') else 'N/A'}") | |
| st.stop() | |
| #st.markdown("## π /tmp/models content:") | |
| #st.text('\n'.join(os.listdir("/tmp/models"))) | |
| # Add local subdirectories to Python path | |
| sys.path.append(os.path.join(os.path.dirname(__file__), "modules")) | |
| sys.path.append(os.path.join(os.path.dirname(__file__), "model")) | |
| sys.path.append(os.path.join(os.path.dirname(__file__), "utils")) | |
| # Lab imports | |
| from modules import ( | |
| prompt_injection_2025v1, | |
| insecure_output_handling_2025v1, | |
| training_data_poisoning_2025v1, | |
| sensitive_information_disclosure_2025v1 | |
| ) | |
| import sys | |
| print("β prompt_injection_2025v1 loaded", file=sys.stderr) | |
| print("β insecure_output_handling_2025v1 loaded", file=sys.stderr) | |
| print("β training_data_poisoning_2025v1 loaded", file=sys.stderr) | |
| print("β sensitive_information_disclosure_2025v1 loaded", file=sys.stderr) | |
| # Streamlit UI setup | |
| st.set_page_config( | |
| page_title="LLM Security Labs", | |
| layout="wide", | |
| initial_sidebar_state="expanded" | |
| ) | |
| # Map Streamlit URL paths to lab modules | |
| lab_key = st.query_params.get("lab") | |
| lab_map = { | |
| "prompt-injection": prompt_injection_2025v1, | |
| "insecure-output-handling": insecure_output_handling_2025v1, | |
| "training-data-poisoning": training_data_poisoning_2025v1, | |
| "sensitive-information-disclosure": sensitive_information_disclosure_2025v1 | |
| } | |
| # Routing | |
| if lab_key in lab_map: | |
| st.title(f"π§ͺ LLM Security Lab β {lab_key.replace('-', ' ').title()} (2025v1)") | |
| lab_map[lab_key].run() | |
| else: | |
| st.title("π§ͺ LLM Security Labs β OWASP-Inspired Threat Scenarios") | |
| st.markdown(""" | |
| This is the landing page for the LLM security labs. Each lab demonstrates a known class of risk aligned with the evolving OWASP LLM Top 10. | |
| Access a lab directly via one of the following URLs: | |
| - [Prompt Injection](?lab=prompt-injection) | |
| - [Insecure Output Handling (coming soon)](#) | |
| - [Training Data Poisoning (coming soon)](#) | |
| - [Sensitive Information Disclosure (coming soon)](#) | |
| Each lab includes: | |
| - **Realistic model interaction** | |
| - **Risk scoring and feedback** | |
| - **Detailed logging** | |
| - **Optional RAG integration** where applicable | |
| """) | |