File size: 7,316 Bytes
e185086
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
"""
Main AI Tutor and Educational Features
"""

import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import random
from datetime import datetime
from .knowledge_math import KnowledgeBase, MathSolver

class EduTutorAI:
    def __init__(self):
        self.model_name = "ibm-granite/granite-3.3-2b-instruct"
        self.tokenizer = None
        self.model = None
        self.text_generator = None
        self.knowledge_base = KnowledgeBase()
        self.math_solver = MathSolver()

    def load_model(self):
        """Load IBM Granite model with fallback"""
        try:
            print("🤖 Loading EduTutor AI model...")
            self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
            
            if self.tokenizer.pad_token is None:
                self.tokenizer.pad_token = self.tokenizer.eos_token

            self.model = AutoModelForCausalLM.from_pretrained(
                self.model_name,
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                low_cpu_mem_usage=True,
                device_map="auto" if torch.cuda.is_available() else None
            )

            self.text_generator = pipeline(
                "text-generation",
                model=self.model,
                tokenizer=self.tokenizer,
                torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
                device_map="auto" if torch.cuda.is_available() else None
            )

            print("✅ IBM Granite model loaded successfully!")
            return True

        except Exception as e:
            print(f"❌ Error loading model: {str(e)}")
            print("🔄 Trying GPT-2 fallback...")
            
            try:
                self.model_name = "gpt2"
                self.tokenizer = AutoTokenizer.from_pretrained("gpt2")
                self.model = AutoModelForCausalLM.from_pretrained("gpt2")
                
                if self.tokenizer.pad_token is None:
                    self.tokenizer.pad_token = self.tokenizer.eos_token

                self.text_generator = pipeline("text-generation", model=self.model, tokenizer=self.tokenizer)
                print("✅ GPT-2 fallback loaded!")
                return True
            except Exception as e2:
                print(f"❌ Fallback failed: {str(e2)}")
                return False

    def is_greeting(self, text: str) -> bool:
        """Check if input is a greeting"""
        greetings = ['hello', 'hi', 'hey', 'good morning', 'good afternoon']
        return any(greeting in text.lower() for greeting in greetings)

    def is_math_problem(self, text: str) -> bool:
        """Check if input contains a math problem"""
        if self.math_solver.is_algebraic_equation(text):
            return True
        math_indicators = ['+', '-', '*', '/', '(', ')', 'calculate', 'compute', 'solve']
        return any(indicator in text.lower() for indicator in math_indicators)

    def generate_response(self, user_input: str, subject: str = "General", difficulty: str = "Intermediate") -> str:
        """Main response generation method"""
        try:
            if self.is_greeting(user_input):
                return self.generate_greeting_response()

            if self.is_math_problem(user_input):
                return self.solve_math_problem(user_input)

            if self.text_generator is not None:
                return self.generate_dynamic_response(user_input, subject, difficulty)
            else:
                return self.generate_fallback_response(user_input, subject, difficulty)

        except Exception as e:
            return self.generate_fallback_response(user_input, subject, difficulty)

    def generate_greeting_response(self) -> str:
        """Generate friendly greeting"""
        responses = [
            "Hello! I'm EduTutor AI, your personal learning assistant. What would you like to study today?",
            "Hi there! Welcome to EduTutor AI! I'm here to help you learn and grow. What can I help you with?",
            "Greetings! I'm ready to make learning fun and engaging. What topic interests you today?"
        ]
        return random.choice(responses)

    def solve_math_problem(self, problem: str) -> str:
        """Solve math problems"""
        try:
            if self.math_solver.is_algebraic_equation(problem):
                return self.math_solver.solve_algebraic_equation(problem)
            else:
                return self.math_solver.solve_arithmetic_expression(problem)
        except Exception as e:
            return f"**Math Problem Analysis**\n\n**Problem:** {problem}\n\n**Approach:** Identify problem type, apply appropriate methods, show steps, verify answer."

    def generate_dynamic_response(self, user_input: str, subject: str, difficulty: str) -> str:
        """Generate AI response"""
        try:
            prompt = f"""You are EduTutor AI, an expert educational assistant specializing in {subject}. 

Student Question: {user_input}
Subject: {subject}
Difficulty Level: {difficulty}

Provide a clear, educational response with explanations, key concepts, and study tips.

Educational Response:"""

            response = self.text_generator(
                prompt,
                max_new_tokens=300,
                temperature=0.7,
                do_sample=True,
                pad_token_id=self.tokenizer.eos_token_id
            )

            generated_text = response[0]['generated_text']
            if "Educational Response:" in generated_text:
                ai_response = generated_text.split("Educational Response:")[-1].strip()
            else:
                ai_response = generated_text.replace(prompt, "").strip()

            formatted_response = f"**🎓 EduTutor AI Response**\n\n"
            formatted_response += f"**Question:** {user_input}\n"
            formatted_response += f"**Subject:** {subject} | **Level:** {difficulty}\n\n"
            formatted_response += f"**Answer:**\n{ai_response}\n\n"
            formatted_response += f"**💡 Study Tip:** Practice similar problems and ask follow-up questions!"

            return formatted_response

        except Exception as e:
            return self.generate_fallback_response(user_input, subject, difficulty)

    def generate_fallback_response(self, user_input: str, subject: str, difficulty: str) -> str:
        """Generate fallback response"""
        topic_info = self.knowledge_base.get_accurate_info(user_input, subject)

        response = f"""**🎓 Educational Response: {user_input}**

**Subject:** {subject} | **Difficulty Level:** {difficulty}

**Understanding the Concept:**
{topic_info['definition']}

**Key Learning Points:**
"""
        for concept in topic_info['key_concepts']:
            response += f"• **{concept}:** Essential for comprehensive understanding\n"

        response += f"""
**Practical Applications:**
{topic_info['applications']}

**Study Recommendations:**
• Review fundamental principles regularly
• Practice with diverse examples and problems
• Connect new learning to previous knowledge
• Don't hesitate to ask follow-up questions!

**💡 Learning Tip:** Break down complex topics into smaller parts and practice regularly!
"""
        return response