RLikhitha commited on
Commit
e185086
·
verified ·
1 Parent(s): 23c3f1e

Create tutor_ai.py

Browse files
Files changed (1) hide show
  1. core/tutor_ai.py +180 -0
core/tutor_ai.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Main AI Tutor and Educational Features
3
+ """
4
+
5
+ import torch
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
7
+ import random
8
+ from datetime import datetime
9
+ from .knowledge_math import KnowledgeBase, MathSolver
10
+
11
+ class EduTutorAI:
12
+ def __init__(self):
13
+ self.model_name = "ibm-granite/granite-3.3-2b-instruct"
14
+ self.tokenizer = None
15
+ self.model = None
16
+ self.text_generator = None
17
+ self.knowledge_base = KnowledgeBase()
18
+ self.math_solver = MathSolver()
19
+
20
+ def load_model(self):
21
+ """Load IBM Granite model with fallback"""
22
+ try:
23
+ print("🤖 Loading EduTutor AI model...")
24
+ self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
25
+
26
+ if self.tokenizer.pad_token is None:
27
+ self.tokenizer.pad_token = self.tokenizer.eos_token
28
+
29
+ self.model = AutoModelForCausalLM.from_pretrained(
30
+ self.model_name,
31
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
32
+ low_cpu_mem_usage=True,
33
+ device_map="auto" if torch.cuda.is_available() else None
34
+ )
35
+
36
+ self.text_generator = pipeline(
37
+ "text-generation",
38
+ model=self.model,
39
+ tokenizer=self.tokenizer,
40
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
41
+ device_map="auto" if torch.cuda.is_available() else None
42
+ )
43
+
44
+ print("✅ IBM Granite model loaded successfully!")
45
+ return True
46
+
47
+ except Exception as e:
48
+ print(f"❌ Error loading model: {str(e)}")
49
+ print("🔄 Trying GPT-2 fallback...")
50
+
51
+ try:
52
+ self.model_name = "gpt2"
53
+ self.tokenizer = AutoTokenizer.from_pretrained("gpt2")
54
+ self.model = AutoModelForCausalLM.from_pretrained("gpt2")
55
+
56
+ if self.tokenizer.pad_token is None:
57
+ self.tokenizer.pad_token = self.tokenizer.eos_token
58
+
59
+ self.text_generator = pipeline("text-generation", model=self.model, tokenizer=self.tokenizer)
60
+ print("✅ GPT-2 fallback loaded!")
61
+ return True
62
+ except Exception as e2:
63
+ print(f"❌ Fallback failed: {str(e2)}")
64
+ return False
65
+
66
+ def is_greeting(self, text: str) -> bool:
67
+ """Check if input is a greeting"""
68
+ greetings = ['hello', 'hi', 'hey', 'good morning', 'good afternoon']
69
+ return any(greeting in text.lower() for greeting in greetings)
70
+
71
+ def is_math_problem(self, text: str) -> bool:
72
+ """Check if input contains a math problem"""
73
+ if self.math_solver.is_algebraic_equation(text):
74
+ return True
75
+ math_indicators = ['+', '-', '*', '/', '(', ')', 'calculate', 'compute', 'solve']
76
+ return any(indicator in text.lower() for indicator in math_indicators)
77
+
78
+ def generate_response(self, user_input: str, subject: str = "General", difficulty: str = "Intermediate") -> str:
79
+ """Main response generation method"""
80
+ try:
81
+ if self.is_greeting(user_input):
82
+ return self.generate_greeting_response()
83
+
84
+ if self.is_math_problem(user_input):
85
+ return self.solve_math_problem(user_input)
86
+
87
+ if self.text_generator is not None:
88
+ return self.generate_dynamic_response(user_input, subject, difficulty)
89
+ else:
90
+ return self.generate_fallback_response(user_input, subject, difficulty)
91
+
92
+ except Exception as e:
93
+ return self.generate_fallback_response(user_input, subject, difficulty)
94
+
95
+ def generate_greeting_response(self) -> str:
96
+ """Generate friendly greeting"""
97
+ responses = [
98
+ "Hello! I'm EduTutor AI, your personal learning assistant. What would you like to study today?",
99
+ "Hi there! Welcome to EduTutor AI! I'm here to help you learn and grow. What can I help you with?",
100
+ "Greetings! I'm ready to make learning fun and engaging. What topic interests you today?"
101
+ ]
102
+ return random.choice(responses)
103
+
104
+ def solve_math_problem(self, problem: str) -> str:
105
+ """Solve math problems"""
106
+ try:
107
+ if self.math_solver.is_algebraic_equation(problem):
108
+ return self.math_solver.solve_algebraic_equation(problem)
109
+ else:
110
+ return self.math_solver.solve_arithmetic_expression(problem)
111
+ except Exception as e:
112
+ return f"**Math Problem Analysis**\n\n**Problem:** {problem}\n\n**Approach:** Identify problem type, apply appropriate methods, show steps, verify answer."
113
+
114
+ def generate_dynamic_response(self, user_input: str, subject: str, difficulty: str) -> str:
115
+ """Generate AI response"""
116
+ try:
117
+ prompt = f"""You are EduTutor AI, an expert educational assistant specializing in {subject}.
118
+
119
+ Student Question: {user_input}
120
+ Subject: {subject}
121
+ Difficulty Level: {difficulty}
122
+
123
+ Provide a clear, educational response with explanations, key concepts, and study tips.
124
+
125
+ Educational Response:"""
126
+
127
+ response = self.text_generator(
128
+ prompt,
129
+ max_new_tokens=300,
130
+ temperature=0.7,
131
+ do_sample=True,
132
+ pad_token_id=self.tokenizer.eos_token_id
133
+ )
134
+
135
+ generated_text = response[0]['generated_text']
136
+ if "Educational Response:" in generated_text:
137
+ ai_response = generated_text.split("Educational Response:")[-1].strip()
138
+ else:
139
+ ai_response = generated_text.replace(prompt, "").strip()
140
+
141
+ formatted_response = f"**🎓 EduTutor AI Response**\n\n"
142
+ formatted_response += f"**Question:** {user_input}\n"
143
+ formatted_response += f"**Subject:** {subject} | **Level:** {difficulty}\n\n"
144
+ formatted_response += f"**Answer:**\n{ai_response}\n\n"
145
+ formatted_response += f"**💡 Study Tip:** Practice similar problems and ask follow-up questions!"
146
+
147
+ return formatted_response
148
+
149
+ except Exception as e:
150
+ return self.generate_fallback_response(user_input, subject, difficulty)
151
+
152
+ def generate_fallback_response(self, user_input: str, subject: str, difficulty: str) -> str:
153
+ """Generate fallback response"""
154
+ topic_info = self.knowledge_base.get_accurate_info(user_input, subject)
155
+
156
+ response = f"""**🎓 Educational Response: {user_input}**
157
+
158
+ **Subject:** {subject} | **Difficulty Level:** {difficulty}
159
+
160
+ **Understanding the Concept:**
161
+ {topic_info['definition']}
162
+
163
+ **Key Learning Points:**
164
+ """
165
+ for concept in topic_info['key_concepts']:
166
+ response += f"• **{concept}:** Essential for comprehensive understanding\n"
167
+
168
+ response += f"""
169
+ **Practical Applications:**
170
+ {topic_info['applications']}
171
+
172
+ **Study Recommendations:**
173
+ • Review fundamental principles regularly
174
+ • Practice with diverse examples and problems
175
+ • Connect new learning to previous knowledge
176
+ • Don't hesitate to ask follow-up questions!
177
+
178
+ **💡 Learning Tip:** Break down complex topics into smaller parts and practice regularly!
179
+ """
180
+ return response