Update app.py
Browse files
app.py
CHANGED
|
@@ -5,33 +5,33 @@ import tempfile
|
|
| 5 |
# Initialize the Hugging Face Inference Client
|
| 6 |
client = InferenceClient()
|
| 7 |
|
| 8 |
-
# Function to generate
|
| 9 |
def generate_content(selected_topic, subtopic, complexity, input_text, examples_count, output_type):
|
| 10 |
"""
|
| 11 |
-
Generate content dynamically based on user input
|
| 12 |
|
| 13 |
Args:
|
| 14 |
-
selected_topic (str):
|
| 15 |
-
subtopic (str):
|
| 16 |
-
complexity (str):
|
| 17 |
-
input_text (str): Additional
|
| 18 |
-
examples_count (int): Number of examples to generate.
|
| 19 |
-
output_type (str): Desired output format (Plain Text, LaTeX,
|
| 20 |
|
| 21 |
Returns:
|
| 22 |
-
|
| 23 |
"""
|
| 24 |
-
#
|
| 25 |
prompt = (
|
| 26 |
f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} examples, lessons, "
|
| 27 |
f"or problems related to {subtopic}. Context: {input_text}" if input_text.strip()
|
| 28 |
else f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} lessons "
|
| 29 |
f"or problems focused on {subtopic}."
|
| 30 |
)
|
| 31 |
-
messages = [{"role": "user", "content": prompt}]
|
| 32 |
|
| 33 |
try:
|
| 34 |
-
# Generate
|
|
|
|
| 35 |
response = client.chat.completions.create(
|
| 36 |
model="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 37 |
messages=messages,
|
|
@@ -39,27 +39,32 @@ def generate_content(selected_topic, subtopic, complexity, input_text, examples_
|
|
| 39 |
max_tokens=1024,
|
| 40 |
top_p=0.7
|
| 41 |
)
|
| 42 |
-
content
|
|
|
|
| 43 |
|
| 44 |
-
#
|
| 45 |
if output_type == "LaTeX":
|
| 46 |
-
#
|
| 47 |
-
latex_content = f"$$\n{content}\n$$"
|
| 48 |
-
return
|
| 49 |
elif output_type == "Downloadable":
|
|
|
|
| 50 |
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt")
|
| 51 |
with open(temp_file.name, "w") as file:
|
| 52 |
file.write(content)
|
| 53 |
-
return
|
| 54 |
else:
|
| 55 |
-
|
|
|
|
| 56 |
except Exception as e:
|
| 57 |
-
return
|
|
|
|
|
|
|
| 58 |
|
| 59 |
# Create the Gradio interface
|
| 60 |
with gr.Blocks() as app:
|
| 61 |
-
# App Title and
|
| 62 |
-
gr.Markdown("## π Advanced STEM and Code Generator with LaTeX
|
| 63 |
|
| 64 |
with gr.Row():
|
| 65 |
# Input Section
|
|
@@ -104,14 +109,10 @@ with gr.Blocks() as app:
|
|
| 104 |
download_button = gr.File(label="Download File (if applicable)")
|
| 105 |
|
| 106 |
# Connect the generate function to the button
|
| 107 |
-
def update_output(result):
|
| 108 |
-
if
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
elif "file" in result:
|
| 112 |
-
return "File ready for download.", result["file"]
|
| 113 |
-
else:
|
| 114 |
-
return result, None
|
| 115 |
|
| 116 |
generate_button.click(
|
| 117 |
fn=generate_content,
|
|
|
|
| 5 |
# Initialize the Hugging Face Inference Client
|
| 6 |
client = InferenceClient()
|
| 7 |
|
| 8 |
+
# Function to generate content dynamically
|
| 9 |
def generate_content(selected_topic, subtopic, complexity, input_text, examples_count, output_type):
|
| 10 |
"""
|
| 11 |
+
Generate content dynamically based on user input with support for LaTeX and file downloads.
|
| 12 |
|
| 13 |
Args:
|
| 14 |
+
selected_topic (str): The selected topic (e.g., Math, STEM, Code Generation).
|
| 15 |
+
subtopic (str): A specific subtopic for content generation.
|
| 16 |
+
complexity (str): Expertise level (Beginner, Intermediate, Advanced).
|
| 17 |
+
input_text (str): Additional context or problem to solve.
|
| 18 |
+
examples_count (int): Number of examples or outputs to generate.
|
| 19 |
+
output_type (str): Desired output format (Plain Text, LaTeX, Downloadable).
|
| 20 |
|
| 21 |
Returns:
|
| 22 |
+
tuple: Generated content and file path (if applicable).
|
| 23 |
"""
|
| 24 |
+
# Create the prompt dynamically
|
| 25 |
prompt = (
|
| 26 |
f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} examples, lessons, "
|
| 27 |
f"or problems related to {subtopic}. Context: {input_text}" if input_text.strip()
|
| 28 |
else f"Generate {examples_count} {complexity.lower()}-level {selected_topic.lower()} lessons "
|
| 29 |
f"or problems focused on {subtopic}."
|
| 30 |
)
|
|
|
|
| 31 |
|
| 32 |
try:
|
| 33 |
+
# Generate content using the model
|
| 34 |
+
messages = [{"role": "user", "content": prompt}]
|
| 35 |
response = client.chat.completions.create(
|
| 36 |
model="Qwen/Qwen2.5-Coder-32B-Instruct",
|
| 37 |
messages=messages,
|
|
|
|
| 39 |
max_tokens=1024,
|
| 40 |
top_p=0.7
|
| 41 |
)
|
| 42 |
+
# Extract content from the response
|
| 43 |
+
content = response.choices[0].message.content if response.choices else "No content generated."
|
| 44 |
|
| 45 |
+
# Handle output formatting
|
| 46 |
if output_type == "LaTeX":
|
| 47 |
+
# Ensure LaTeX content is properly wrapped
|
| 48 |
+
latex_content = f"$$\n{content.strip()}\n$$"
|
| 49 |
+
return latex_content, None
|
| 50 |
elif output_type == "Downloadable":
|
| 51 |
+
# Save content to a temporary file
|
| 52 |
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt")
|
| 53 |
with open(temp_file.name, "w") as file:
|
| 54 |
file.write(content)
|
| 55 |
+
return "File generated successfully. Use the download button.", temp_file.name
|
| 56 |
else:
|
| 57 |
+
# Default to plain text
|
| 58 |
+
return content, None
|
| 59 |
except Exception as e:
|
| 60 |
+
# Catch and return any errors
|
| 61 |
+
return f"Error during content generation: {e}", None
|
| 62 |
+
|
| 63 |
|
| 64 |
# Create the Gradio interface
|
| 65 |
with gr.Blocks() as app:
|
| 66 |
+
# App Title and Description
|
| 67 |
+
gr.Markdown("## π Advanced STEM and Code Generator with LaTeX and File Downloads")
|
| 68 |
|
| 69 |
with gr.Row():
|
| 70 |
# Input Section
|
|
|
|
| 109 |
download_button = gr.File(label="Download File (if applicable)")
|
| 110 |
|
| 111 |
# Connect the generate function to the button
|
| 112 |
+
def update_output(result, file_path):
|
| 113 |
+
if file_path:
|
| 114 |
+
return result, file_path
|
| 115 |
+
return result, None
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
|
| 117 |
generate_button.click(
|
| 118 |
fn=generate_content,
|