Spaces:
Sleeping
Sleeping
| # streamlit_app.py | |
| import streamlit as st | |
| import re | |
| from sympy import symbols, integrate, exp, pi | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| import torch | |
| st.set_page_config(page_title="AI Problem Solver By Mathematically Modelling", page_icon="π§ ") | |
| x, t = symbols("x t") | |
| def extract_integral(problem_text): | |
| match = re.search(r'(\d+)\*?[tx]\^(\d+)', problem_text) | |
| limits = re.findall(r'[tx]\s*=\s*([\d\.\w]+)', problem_text) | |
| exp_match = re.search(r'(\d+)e\^([\-\+]?\d+\.?\d*)[tx]', problem_text) | |
| if 'radioactive' in problem_text or 'half-life' in problem_text: | |
| decay_match = re.search(r'(\d+)\s*e\^\s*-\s*(\d+\.?\d*)[tx]', problem_text) | |
| if decay_match and len(limits) == 2: | |
| N0 = int(decay_match.group(1)) | |
| lam = float(decay_match.group(2)) | |
| lower, upper = map(lambda v: eval(v, {"pi": pi}), limits) | |
| expr = lam * N0 * exp(-lam * t) | |
| return f"Total decayed = {integrate(expr, (t, lower, upper)).evalf()} units." | |
| if match and len(limits) == 2: | |
| coefficient = int(match.group(1)) | |
| exponent = int(match.group(2)) | |
| lower_limit = eval(limits[0], {"pi": pi}) | |
| upper_limit = eval(limits[1], {"pi": pi}) | |
| expr = coefficient * x**exponent | |
| return f"Accumulated Quantity = {integrate(expr, (x, lower_limit, upper_limit))}" | |
| return "Could not parse the integral format." | |
| def load_model(): | |
| # Change this if you want to fallback to a smaller model on CPU | |
| use_light_model = not torch.cuda.is_available() | |
| model_name = ( | |
| "deepseek-ai/deepseek-math-7b-base" if not use_light_model | |
| else "tiiuae/falcon-7b-instruct" | |
| ) | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
| device_map="auto" if torch.cuda.is_available() else None | |
| ) | |
| return tokenizer, model | |
| def run_llm_reasoning(user_question): | |
| tokenizer, model = load_model() | |
| prompt = f""" | |
| Q: Solve the following physics problem using rigorous mathematical reasoning. Do not skip any steps. | |
| Problem: {user_question} | |
| ### Final Answer format: | |
| Final Answer: [VARIABLE] = [ANSWER] [UNIT] | |
| A:""" | |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=500, | |
| temperature=0.2, | |
| repetition_penalty=1.0, | |
| eos_token_id=tokenizer.eos_token_id, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| return tokenizer.decode(outputs[0], skip_special_tokens=True).split("A:")[-1].strip() | |
| # ---------------- UI ---------------- | |
| st.title("π§ AI Physics & Math Solver") | |
| task_type = st.selectbox("Choose Task Type", ["LLM Reasoning (DeepSeek/Fallback)", "Symbolic Integration"]) | |
| user_question = st.text_area("Enter your physics or math question below:") | |
| if st.button("Solve"): | |
| with st.spinner("Solving..."): | |
| if task_type == "LLM Reasoning (DeepSeek/Fallback)": | |
| result = run_llm_reasoning(user_question) | |
| else: | |
| result = extract_integral(user_question) | |
| st.subheader("π Answer") | |
| st.write(result) | |