Spaces:
Running
Running
Delete walkthrough-enhancement.js
Browse files- walkthrough-enhancement.js +0 -755
walkthrough-enhancement.js
DELETED
|
@@ -1,755 +0,0 @@
|
|
| 1 |
-
// Complete Walkthrough Mode Implementation
|
| 2 |
-
// This replaces the incomplete walkthrough section in index.html
|
| 3 |
-
|
| 4 |
-
// Walkthrough Mode functionality
|
| 5 |
-
let walkthroughActive = false;
|
| 6 |
-
let walkthroughStep = 0;
|
| 7 |
-
let walkthroughTutorial = null;
|
| 8 |
-
let walkthroughTrainingSpeed = 2000; // Much slower training speed for walkthrough
|
| 9 |
-
let walkthroughInterval = null;
|
| 10 |
-
let walkthroughExplanationMode = false;
|
| 11 |
-
|
| 12 |
-
const walkthroughTutorials = {
|
| 13 |
-
basics: {
|
| 14 |
-
title: 'Neural Network Basics',
|
| 15 |
-
task: 'and', // Use AND gate for demonstration
|
| 16 |
-
steps: [
|
| 17 |
-
{
|
| 18 |
-
title: 'Welcome to Neural Networks! 🧠',
|
| 19 |
-
content: 'Neural networks are inspired by the human brain. They consist of layers of interconnected neurons that process information. We\'ll use a simple AND gate to learn how they work step by step!',
|
| 20 |
-
element: null,
|
| 21 |
-
position: 'center',
|
| 22 |
-
action: 'start'
|
| 23 |
-
},
|
| 24 |
-
{
|
| 25 |
-
title: 'Input Layer (Blue Circles)',
|
| 26 |
-
content: 'These blue circles are input neurons. For our AND gate, we have 2 inputs that can be either 0 or 1. Watch the numbers inside - they show the current input values being processed!',
|
| 27 |
-
element: '#networkCanvas',
|
| 28 |
-
position: 'right',
|
| 29 |
-
highlight: {x: 50, y: 50, width: 100, height: 200},
|
| 30 |
-
action: 'highlight_inputs'
|
| 31 |
-
},
|
| 32 |
-
{
|
| 33 |
-
title: 'Hidden Layer (Green Circles)',
|
| 34 |
-
content: 'These green neurons are the "thinking" layer. They receive signals from inputs, multiply them by weights, add biases, and decide how "excited" to get. The numbers show their activation levels!',
|
| 35 |
-
element: '#networkCanvas',
|
| 36 |
-
position: 'left',
|
| 37 |
-
highlight: {x: 150, y: 50, width: 100, height: 200},
|
| 38 |
-
action: 'highlight_hidden'
|
| 39 |
-
},
|
| 40 |
-
{
|
| 41 |
-
title: 'Output Layer (Purple Circle)',
|
| 42 |
-
content: 'This purple neuron gives us the final answer! For AND gate, it should output 1 only when BOTH inputs are 1. Right now it\'s making random guesses - that\'s why we need to train it!',
|
| 43 |
-
element: '#networkCanvas',
|
| 44 |
-
position: 'left',
|
| 45 |
-
highlight: {x: 250, y: 120, width: 100, height: 60},
|
| 46 |
-
action: 'highlight_output'
|
| 47 |
-
},
|
| 48 |
-
{
|
| 49 |
-
title: 'Connections (Colored Lines)',
|
| 50 |
-
content: 'These lines are connections with "weights". Green lines are positive (excite the next neuron), red lines are negative (inhibit it). Thicker lines = stronger connections. The AI learns by adjusting these!',
|
| 51 |
-
element: '#networkCanvas',
|
| 52 |
-
position: 'right',
|
| 53 |
-
highlight: {x: 0, y: 0, width: 400, height: 300},
|
| 54 |
-
action: 'highlight_weights'
|
| 55 |
-
},
|
| 56 |
-
{
|
| 57 |
-
title: 'Training Data',
|
| 58 |
-
content: 'Here\'s our training data! Each card shows: input values → expected output. The AI will see these examples and learn the AND gate pattern. Notice the "Raw" prediction - it starts random!',
|
| 59 |
-
element: '#taskOutput',
|
| 60 |
-
position: 'top',
|
| 61 |
-
highlight: {x: -10, y: -10, width: 'auto', height: 'auto'},
|
| 62 |
-
action: 'highlight_data'
|
| 63 |
-
},
|
| 64 |
-
{
|
| 65 |
-
title: 'Let\'s Start Training!',
|
| 66 |
-
content: 'Now I\'ll start training VERY slowly so you can see every step. Watch how the neurons\' values change, connections strengthen/weaken, and the loss decreases as the AI learns!',
|
| 67 |
-
element: '#trainBtn',
|
| 68 |
-
position: 'top',
|
| 69 |
-
highlight: {x: -5, y: -5, width: 'auto', height: 'auto'},
|
| 70 |
-
action: 'start_training'
|
| 71 |
-
}
|
| 72 |
-
]
|
| 73 |
-
},
|
| 74 |
-
training: {
|
| 75 |
-
title: 'How Training Works',
|
| 76 |
-
task: 'xor', // Use XOR for complexity
|
| 77 |
-
steps: [
|
| 78 |
-
{
|
| 79 |
-
title: 'Welcome to AI Training! 🎯',
|
| 80 |
-
content: 'Training is how AI learns! We\'ll use the XOR gate - a challenging problem that requires the AI to think in complex ways. Let\'s see how the network learns step by step.',
|
| 81 |
-
element: null,
|
| 82 |
-
position: 'center',
|
| 83 |
-
action: 'start'
|
| 84 |
-
},
|
| 85 |
-
{
|
| 86 |
-
title: 'Forward Propagation',
|
| 87 |
-
content: 'Step 1: Forward pass! The input flows through the network like water through pipes. Each neuron receives inputs, multiplies by weights, adds bias, and applies activation. Watch the flow!',
|
| 88 |
-
element: '#networkCanvas',
|
| 89 |
-
position: 'right',
|
| 90 |
-
highlight: {x: 0, y: 0, width: 400, height: 300},
|
| 91 |
-
action: 'demo_forward'
|
| 92 |
-
},
|
| 93 |
-
{
|
| 94 |
-
title: 'Making a Prediction',
|
| 95 |
-
content: 'The output neuron gives us a prediction! For XOR: 0,0→0, 0,1→1, 1,0→1, 1,1→0. Right now it\'s wrong - see the red "Wrong" status. The raw output should be close to the target!',
|
| 96 |
-
element: '#taskOutput',
|
| 97 |
-
position: 'top',
|
| 98 |
-
highlight: {x: -10, y: -10, width: 'auto', height: 'auto'},
|
| 99 |
-
action: 'show_prediction'
|
| 100 |
-
},
|
| 101 |
-
{
|
| 102 |
-
title: 'Calculating Loss',
|
| 103 |
-
content: 'Loss measures how wrong we are! It\'s the difference between prediction and target, squared. High loss = very wrong. Low loss = very right. Watch this number in the stats panel!',
|
| 104 |
-
element: '#lossValue',
|
| 105 |
-
position: 'bottom',
|
| 106 |
-
highlight: {x: -10, y: -10, width: 'auto', height: 'auto'},
|
| 107 |
-
action: 'show_loss'
|
| 108 |
-
},
|
| 109 |
-
{
|
| 110 |
-
title: 'Backpropagation Magic',
|
| 111 |
-
content: 'Step 2: Backward pass! The AI traces back through the network asking "who\'s responsible for this error?" and adjusts weights accordingly. This is backpropagation - the heart of deep learning!',
|
| 112 |
-
element: '#networkCanvas',
|
| 113 |
-
position: 'left',
|
| 114 |
-
highlight: {x: 0, y: 0, width: 400, height: 300},
|
| 115 |
-
action: 'demo_backward'
|
| 116 |
-
},
|
| 117 |
-
{
|
| 118 |
-
title: 'Weight Updates',
|
| 119 |
-
content: 'The AI nudges each weight slightly in the direction that reduces error. Good connections get stronger, bad ones get weaker. Watch the line colors and thickness change!',
|
| 120 |
-
element: '#networkCanvas',
|
| 121 |
-
position: 'right',
|
| 122 |
-
highlight: {x: 0, y: 0, width: 400, height: 300},
|
| 123 |
-
action: 'show_weight_updates'
|
| 124 |
-
},
|
| 125 |
-
{
|
| 126 |
-
title: 'Learning Progress',
|
| 127 |
-
content: 'Each training cycle is called an "epoch". Watch the loss chart - it should generally go down as the AI gets better! Sometimes it goes up temporarily - that\'s normal during learning.',
|
| 128 |
-
element: '#lossChart',
|
| 129 |
-
position: 'right',
|
| 130 |
-
highlight: {x: -10, y: -10, width: 'auto', height: 'auto'},
|
| 131 |
-
action: 'show_progress'
|
| 132 |
-
},
|
| 133 |
-
{
|
| 134 |
-
title: 'Continuous Learning',
|
| 135 |
-
content: 'Now watch it train continuously! Each step: forward pass → calculate loss → backward pass → update weights → repeat. This is how all AI learns, from image recognition to chatbots!',
|
| 136 |
-
element: null,
|
| 137 |
-
position: 'center',
|
| 138 |
-
action: 'continuous_training'
|
| 139 |
-
}
|
| 140 |
-
]
|
| 141 |
-
},
|
| 142 |
-
visualization: {
|
| 143 |
-
title: 'Understanding the Visualizations',
|
| 144 |
-
task: 'classification', // Use 2D classification for visual demo
|
| 145 |
-
steps: [
|
| 146 |
-
{
|
| 147 |
-
title: 'Reading AI Visualizations 📊',
|
| 148 |
-
content: 'Visualizations help us understand what\'s happening inside AI! We\'ll use 2D classification - separating red and blue points in space - to see how neural networks think.',
|
| 149 |
-
element: null,
|
| 150 |
-
position: 'center',
|
| 151 |
-
action: 'start'
|
| 152 |
-
},
|
| 153 |
-
{
|
| 154 |
-
title: 'Network Diagram - The Brain Map',
|
| 155 |
-
content: 'This shows the AI\'s structure! Circles = neurons (processing units), Lines = connections (information pathways). Colors show activity levels: bright = excited, dark = calm.',
|
| 156 |
-
element: '#networkCanvas',
|
| 157 |
-
position: 'right',
|
| 158 |
-
highlight: {x: 0, y: 0, width: 400, height: 300},
|
| 159 |
-
action: 'explain_network'
|
| 160 |
-
},
|
| 161 |
-
{
|
| 162 |
-
title: 'Data Visualization - The Problem Space',
|
| 163 |
-
content: 'This 2D plot shows our data! Red dots should be classified as 0, blue dots as 1. The AI needs to learn an invisible boundary that separates them correctly.',
|
| 164 |
-
element: '#dataViz',
|
| 165 |
-
position: 'left',
|
| 166 |
-
highlight: {x: -10, y: -10, width: 'auto', height: 'auto'},
|
| 167 |
-
action: 'explain_data_viz'
|
| 168 |
-
},
|
| 169 |
-
{
|
| 170 |
-
title: 'Training Progress Chart',
|
| 171 |
-
content: 'The loss chart shows learning over time! Starting high (confused), gradually decreasing (getting smarter). Flat periods mean it\'s thinking, sharp drops mean breakthroughs!',
|
| 172 |
-
element: '#lossChart',
|
| 173 |
-
position: 'right',
|
| 174 |
-
highlight: {x: -10, y: -10, width: 'auto', height: 'auto'},
|
| 175 |
-
action: 'explain_loss_chart'
|
| 176 |
-
},
|
| 177 |
-
{
|
| 178 |
-
title: 'Prediction Cards - The Report Card',
|
| 179 |
-
content: 'Each card shows: the input → expected output. "Raw" = neuron\'s actual output (0-1), "Predicted" = final decision, Status = right/wrong. Green borders = correct predictions!',
|
| 180 |
-
element: '#taskOutput',
|
| 181 |
-
position: 'top',
|
| 182 |
-
highlight: {x: -10, y: -10, width: 'auto', height: 'auto'},
|
| 183 |
-
action: 'explain_predictions'
|
| 184 |
-
},
|
| 185 |
-
{
|
| 186 |
-
title: 'Stats Panel - The Dashboard',
|
| 187 |
-
content: 'Epochs = training cycles completed, Loss = how wrong we are, Accuracy = % correct predictions, Current = which example we\'re processing now. These tell the whole story!',
|
| 188 |
-
element: '.stats-grid',
|
| 189 |
-
position: 'bottom',
|
| 190 |
-
highlight: {x: -10, y: -10, width: 'auto', height: 'auto'},
|
| 191 |
-
action: 'explain_stats'
|
| 192 |
-
},
|
| 193 |
-
{
|
| 194 |
-
title: 'Watching It All Together',
|
| 195 |
-
content: 'Now watch everything update in harmony! Network neurons fire, data points get classified, loss decreases, accuracy increases. You\'re seeing the birth of artificial intelligence!',
|
| 196 |
-
element: null,
|
| 197 |
-
position: 'center',
|
| 198 |
-
action: 'full_demo'
|
| 199 |
-
}
|
| 200 |
-
]
|
| 201 |
-
},
|
| 202 |
-
logic: {
|
| 203 |
-
title: 'Logic Gates Deep Dive',
|
| 204 |
-
task: 'xor', // Start with XOR as the most interesting
|
| 205 |
-
steps: [
|
| 206 |
-
{
|
| 207 |
-
title: 'Logic Gates - AI\'s Building Blocks 🔗',
|
| 208 |
-
content: 'Logic gates are the foundation of all computing! We\'ll explore how neural networks learn AND, OR, and the famous XOR gate - the problem that sparked the deep learning revolution!',
|
| 209 |
-
element: null,
|
| 210 |
-
position: 'center',
|
| 211 |
-
action: 'start'
|
| 212 |
-
},
|
| 213 |
-
{
|
| 214 |
-
title: 'The XOR Challenge',
|
| 215 |
-
content: 'XOR (exclusive or) outputs 1 when inputs are different. Seems simple, but it stumped early AI for decades! It\'s not "linearly separable" - you can\'t draw a straight line to separate the outputs.',
|
| 216 |
-
element: '#taskOutput',
|
| 217 |
-
position: 'top',
|
| 218 |
-
highlight: {x: -10, y: -10, width: 'auto', height: 'auto'},
|
| 219 |
-
action: 'explain_xor'
|
| 220 |
-
},
|
| 221 |
-
{
|
| 222 |
-
title: 'Why XOR Needs Deep Networks',
|
| 223 |
-
content: 'Look at our network: 2→12→8→1. We need these hidden layers! Each layer transforms the data, and together they can solve XOR. Single-layer networks can\'t do this!',
|
| 224 |
-
element: '#networkCanvas',
|
| 225 |
-
position: 'right',
|
| 226 |
-
highlight: {x: 0, y: 0, width: 400, height: 300},
|
| 227 |
-
action: 'explain_depth'
|
| 228 |
-
},
|
| 229 |
-
{
|
| 230 |
-
title: 'Layer 1: Feature Detection',
|
| 231 |
-
content: 'The first hidden layer (12 neurons) learns to detect patterns in the input. Some neurons might learn "both inputs high", others "both inputs low", etc. These become building blocks!',
|
| 232 |
-
element: '#networkCanvas',
|
| 233 |
-
position: 'left',
|
| 234 |
-
highlight: {x: 100, y: 50, width: 80, height: 200},
|
| 235 |
-
action: 'explain_layer1'
|
| 236 |
-
},
|
| 237 |
-
{
|
| 238 |
-
title: 'Layer 2: Combination Logic',
|
| 239 |
-
content: 'The second hidden layer (8 neurons) combines the features from layer 1. It might learn rules like "if feature A is active but feature B isn\'t, then activate". This creates complex logic!',
|
| 240 |
-
element: '#networkCanvas',
|
| 241 |
-
position: 'right',
|
| 242 |
-
highlight: {x: 200, y: 80, width: 80, height: 140},
|
| 243 |
-
action: 'explain_layer2'
|
| 244 |
-
},
|
| 245 |
-
{
|
| 246 |
-
title: 'Output: The Final Decision',
|
| 247 |
-
content: 'The output neuron combines all the complex features into a final decision. It learns to say "yes" (1) when the XOR pattern is detected, "no" (0) otherwise. Magic!',
|
| 248 |
-
element: '#networkCanvas',
|
| 249 |
-
position: 'left',
|
| 250 |
-
highlight: {x: 300, y: 120, width: 80, height: 60},
|
| 251 |
-
action: 'explain_output'
|
| 252 |
-
},
|
| 253 |
-
{
|
| 254 |
-
title: 'Training Process',
|
| 255 |
-
content: 'Watch as the network slowly learns XOR! Early on, it makes random guesses. Gradually, it discovers the pattern. You\'ll see the accuracy climb from 25% (random) to 100% (perfect)!',
|
| 256 |
-
element: '#accuracyValue',
|
| 257 |
-
position: 'bottom',
|
| 258 |
-
highlight: {x: -10, y: -10, width: 'auto', height: 'auto'},
|
| 259 |
-
action: 'demo_training'
|
| 260 |
-
},
|
| 261 |
-
{
|
| 262 |
-
title: 'The Learning Moment',
|
| 263 |
-
content: 'There\'s often a "eureka moment" where the AI suddenly "gets it" - loss drops rapidly, accuracy jumps! This is the network discovering the XOR pattern. It\'s like watching intelligence emerge!',
|
| 264 |
-
element: '#lossChart',
|
| 265 |
-
position: 'right',
|
| 266 |
-
highlight: {x: -10, y: -10, width: 'auto', height: 'auto'},
|
| 267 |
-
action: 'show_breakthrough'
|
| 268 |
-
}
|
| 269 |
-
]
|
| 270 |
-
}
|
| 271 |
-
};
|
| 272 |
-
|
| 273 |
-
// Walkthrough DOM elements
|
| 274 |
-
const walkthroughOverlay = document.getElementById('walkthroughOverlay');
|
| 275 |
-
const walkthroughHighlight = document.getElementById('walkthroughHighlight');
|
| 276 |
-
const walkthroughPopup = document.getElementById('walkthroughPopup');
|
| 277 |
-
const walkthroughTitle = document.getElementById('walkthroughTitle');
|
| 278 |
-
const walkthroughContent = document.getElementById('walkthroughContent');
|
| 279 |
-
const walkthroughProgress = document.getElementById('walkthroughProgress');
|
| 280 |
-
const walkthroughStepSpan = document.getElementById('walkthroughStep');
|
| 281 |
-
const walkthroughTotal = document.getElementById('walkthroughTotal');
|
| 282 |
-
const walkthroughIndicator = document.getElementById('walkthroughIndicator');
|
| 283 |
-
const walkthroughPrev = document.getElementById('walkthroughPrev');
|
| 284 |
-
const walkthroughNext = document.getElementById('walkthroughNext');
|
| 285 |
-
const walkthroughSkip = document.getElementById('walkthroughSkip');
|
| 286 |
-
|
| 287 |
-
// Start walkthrough function
|
| 288 |
-
function startWalkthrough(tutorialId) {
|
| 289 |
-
const tutorial = walkthroughTutorials[tutorialId];
|
| 290 |
-
if (!tutorial) return;
|
| 291 |
-
|
| 292 |
-
walkthroughActive = true;
|
| 293 |
-
walkthroughTutorial = tutorial;
|
| 294 |
-
walkthroughStep = 0;
|
| 295 |
-
walkthroughExplanationMode = true;
|
| 296 |
-
|
| 297 |
-
// Load the tutorial task if specified
|
| 298 |
-
if (tutorial.task) {
|
| 299 |
-
currentCategory = 'fundamentals'; // Most tutorials use fundamentals
|
| 300 |
-
selectTask(tutorial.task);
|
| 301 |
-
// Wait a moment for task to load
|
| 302 |
-
setTimeout(() => {
|
| 303 |
-
showWalkthroughStep();
|
| 304 |
-
}, 500);
|
| 305 |
-
} else {
|
| 306 |
-
showWalkthroughStep();
|
| 307 |
-
}
|
| 308 |
-
|
| 309 |
-
// Hide walkthrough mode menu
|
| 310 |
-
document.getElementById('walkthroughMode').style.display = 'none';
|
| 311 |
-
|
| 312 |
-
// Show indicator
|
| 313 |
-
walkthroughIndicator.style.display = 'block';
|
| 314 |
-
}
|
| 315 |
-
|
| 316 |
-
// Show current walkthrough step
|
| 317 |
-
function showWalkthroughStep() {
|
| 318 |
-
if (!walkthroughTutorial || walkthroughStep >= walkthroughTutorial.steps.length) {
|
| 319 |
-
endWalkthrough();
|
| 320 |
-
return;
|
| 321 |
-
}
|
| 322 |
-
|
| 323 |
-
const step = walkthroughTutorial.steps[walkthroughStep];
|
| 324 |
-
|
| 325 |
-
// Update progress
|
| 326 |
-
walkthroughProgress.style.display = 'block';
|
| 327 |
-
walkthroughStepSpan.textContent = walkthroughStep + 1;
|
| 328 |
-
walkthroughTotal.textContent = walkthroughTutorial.steps.length;
|
| 329 |
-
|
| 330 |
-
// Update popup content
|
| 331 |
-
walkthroughTitle.textContent = step.title;
|
| 332 |
-
walkthroughContent.textContent = step.content;
|
| 333 |
-
|
| 334 |
-
// Handle special actions
|
| 335 |
-
if (step.action) {
|
| 336 |
-
executeWalkthroughAction(step.action);
|
| 337 |
-
}
|
| 338 |
-
|
| 339 |
-
// Position popup and highlight
|
| 340 |
-
if (step.element) {
|
| 341 |
-
positionWalkthroughElements(step);
|
| 342 |
-
} else {
|
| 343 |
-
// Center popup for intro steps
|
| 344 |
-
centerWalkthroughPopup();
|
| 345 |
-
walkthroughHighlight.style.display = 'none';
|
| 346 |
-
}
|
| 347 |
-
|
| 348 |
-
// Show overlay and popup
|
| 349 |
-
walkthroughOverlay.style.display = 'block';
|
| 350 |
-
walkthroughPopup.style.display = 'block';
|
| 351 |
-
|
| 352 |
-
// Update buttons
|
| 353 |
-
walkthroughPrev.style.display = walkthroughStep > 0 ? 'block' : 'none';
|
| 354 |
-
walkthroughNext.textContent = walkthroughStep === walkthroughTutorial.steps.length - 1 ? 'Finish' : 'Next';
|
| 355 |
-
}
|
| 356 |
-
|
| 357 |
-
// Position walkthrough elements
|
| 358 |
-
function positionWalkthroughElements(step) {
|
| 359 |
-
const element = document.querySelector(step.element);
|
| 360 |
-
if (!element) return;
|
| 361 |
-
|
| 362 |
-
const rect = element.getBoundingClientRect();
|
| 363 |
-
const popup = walkthroughPopup;
|
| 364 |
-
|
| 365 |
-
// Position highlight
|
| 366 |
-
if (step.highlight) {
|
| 367 |
-
walkthroughHighlight.style.display = 'block';
|
| 368 |
-
if (step.highlight.width === 'auto') {
|
| 369 |
-
walkthroughHighlight.style.left = (rect.left - 10) + 'px';
|
| 370 |
-
walkthroughHighlight.style.top = (rect.top - 10) + 'px';
|
| 371 |
-
walkthroughHighlight.style.width = (rect.width + 20) + 'px';
|
| 372 |
-
walkthroughHighlight.style.height = (rect.height + 20) + 'px';
|
| 373 |
-
} else {
|
| 374 |
-
walkthroughHighlight.style.left = (rect.left + step.highlight.x) + 'px';
|
| 375 |
-
walkthroughHighlight.style.top = (rect.top + step.highlight.y) + 'px';
|
| 376 |
-
walkthroughHighlight.style.width = step.highlight.width + 'px';
|
| 377 |
-
walkthroughHighlight.style.height = step.highlight.height + 'px';
|
| 378 |
-
}
|
| 379 |
-
} else {
|
| 380 |
-
walkthroughHighlight.style.display = 'none';
|
| 381 |
-
}
|
| 382 |
-
|
| 383 |
-
// Position popup based on position preference
|
| 384 |
-
popup.className = 'walkthrough-popup ' + step.position;
|
| 385 |
-
|
| 386 |
-
const popupRect = popup.getBoundingClientRect();
|
| 387 |
-
let left, top;
|
| 388 |
-
|
| 389 |
-
switch (step.position) {
|
| 390 |
-
case 'top':
|
| 391 |
-
left = rect.left + rect.width / 2 - popupRect.width / 2;
|
| 392 |
-
top = rect.top - popupRect.height - 20;
|
| 393 |
-
break;
|
| 394 |
-
case 'bottom':
|
| 395 |
-
left = rect.left + rect.width / 2 - popupRect.width / 2;
|
| 396 |
-
top = rect.bottom + 20;
|
| 397 |
-
break;
|
| 398 |
-
case 'left':
|
| 399 |
-
left = rect.left - popupRect.width - 20;
|
| 400 |
-
top = rect.top + rect.height / 2 - popupRect.height / 2;
|
| 401 |
-
break;
|
| 402 |
-
case 'right':
|
| 403 |
-
left = rect.right + 20;
|
| 404 |
-
top = rect.top + rect.height / 2 - popupRect.height / 2;
|
| 405 |
-
break;
|
| 406 |
-
default:
|
| 407 |
-
centerWalkthroughPopup();
|
| 408 |
-
return;
|
| 409 |
-
}
|
| 410 |
-
|
| 411 |
-
// Keep popup on screen
|
| 412 |
-
left = Math.max(10, Math.min(left, window.innerWidth - popupRect.width - 10));
|
| 413 |
-
top = Math.max(10, Math.min(top, window.innerHeight - popupRect.height - 10));
|
| 414 |
-
|
| 415 |
-
popup.style.left = left + 'px';
|
| 416 |
-
popup.style.top = top + 'px';
|
| 417 |
-
}
|
| 418 |
-
|
| 419 |
-
// Center walkthrough popup
|
| 420 |
-
function centerWalkthroughPopup() {
|
| 421 |
-
const popup = walkthroughPopup;
|
| 422 |
-
popup.className = 'walkthrough-popup center';
|
| 423 |
-
popup.style.left = '50%';
|
| 424 |
-
popup.style.top = '50%';
|
| 425 |
-
popup.style.transform = 'translate(-50%, -50%)';
|
| 426 |
-
}
|
| 427 |
-
|
| 428 |
-
// Execute special walkthrough actions
|
| 429 |
-
function executeWalkthroughAction(action) {
|
| 430 |
-
switch (action) {
|
| 431 |
-
case 'start':
|
| 432 |
-
// Reset network and prepare for demonstration
|
| 433 |
-
if (currentTask && network) {
|
| 434 |
-
reset();
|
| 435 |
-
}
|
| 436 |
-
break;
|
| 437 |
-
|
| 438 |
-
case 'start_training':
|
| 439 |
-
// Start very slow training for educational purposes
|
| 440 |
-
if (!isTraining && currentTask) {
|
| 441 |
-
trainBtn.click();
|
| 442 |
-
}
|
| 443 |
-
break;
|
| 444 |
-
|
| 445 |
-
case 'demo_forward':
|
| 446 |
-
// Slow down even more to show forward propagation
|
| 447 |
-
walkthroughTrainingSpeed = 3000;
|
| 448 |
-
break;
|
| 449 |
-
|
| 450 |
-
case 'demo_backward':
|
| 451 |
-
// Highlight the backward pass concept
|
| 452 |
-
addWalkthroughExplanation('Backpropagation traces the error backwards through each connection, calculating how much each weight contributed to the mistake.');
|
| 453 |
-
break;
|
| 454 |
-
|
| 455 |
-
case 'show_weight_updates':
|
| 456 |
-
// Highlight weight changes
|
| 457 |
-
addWalkthroughExplanation('Watch the connection lines change! Thicker lines = stronger weights, colors show positive (green) vs negative (red) influence.');
|
| 458 |
-
break;
|
| 459 |
-
|
| 460 |
-
case 'continuous_training':
|
| 461 |
-
// Return to normal-ish speed but still educational
|
| 462 |
-
walkthroughTrainingSpeed = 1000;
|
| 463 |
-
break;
|
| 464 |
-
|
| 465 |
-
case 'explain_network':
|
| 466 |
-
// Add network explanation overlay
|
| 467 |
-
addWalkthroughExplanation('Bright neurons are highly activated (excited), dark neurons are inactive (calm). The patterns show how information flows!');
|
| 468 |
-
break;
|
| 469 |
-
|
| 470 |
-
case 'explain_xor':
|
| 471 |
-
// Special explanation for XOR challenge
|
| 472 |
-
addWalkthroughExplanation('XOR is special! Unlike AND/OR, you cannot draw a single straight line to separate the correct outputs. This requires complex thinking!');
|
| 473 |
-
break;
|
| 474 |
-
|
| 475 |
-
case 'demo_training':
|
| 476 |
-
// Start training and monitor for breakthrough moments
|
| 477 |
-
if (!isTraining && currentTask) {
|
| 478 |
-
trainBtn.click();
|
| 479 |
-
monitorTrainingProgress();
|
| 480 |
-
}
|
| 481 |
-
break;
|
| 482 |
-
}
|
| 483 |
-
}
|
| 484 |
-
|
| 485 |
-
// Add temporary explanation overlay
|
| 486 |
-
function addWalkthroughExplanation(text) {
|
| 487 |
-
const explanation = document.createElement('div');
|
| 488 |
-
explanation.style.cssText = `
|
| 489 |
-
position: fixed;
|
| 490 |
-
bottom: 100px;
|
| 491 |
-
left: 50%;
|
| 492 |
-
transform: translateX(-50%);
|
| 493 |
-
background: rgba(16, 185, 129, 0.95);
|
| 494 |
-
color: white;
|
| 495 |
-
padding: 1rem 2rem;
|
| 496 |
-
border-radius: 0.5rem;
|
| 497 |
-
font-size: 0.9rem;
|
| 498 |
-
max-width: 400px;
|
| 499 |
-
text-align: center;
|
| 500 |
-
z-index: 10003;
|
| 501 |
-
animation: fadeInOut 4s ease-in-out;
|
| 502 |
-
`;
|
| 503 |
-
explanation.textContent = text;
|
| 504 |
-
document.body.appendChild(explanation);
|
| 505 |
-
|
| 506 |
-
setTimeout(() => {
|
| 507 |
-
if (explanation.parentNode) {
|
| 508 |
-
explanation.parentNode.removeChild(explanation);
|
| 509 |
-
}
|
| 510 |
-
}, 4000);
|
| 511 |
-
}
|
| 512 |
-
|
| 513 |
-
// Add CSS animation for explanations
|
| 514 |
-
const walkthroughStyle = document.createElement('style');
|
| 515 |
-
walkthroughStyle.textContent = `
|
| 516 |
-
@keyframes fadeInOut {
|
| 517 |
-
0% { opacity: 0; transform: translateX(-50%) translateY(20px); }
|
| 518 |
-
10% { opacity: 1; transform: translateX(-50%) translateY(0); }
|
| 519 |
-
90% { opacity: 1; transform: translateX(-50%) translateY(0); }
|
| 520 |
-
100% { opacity: 0; transform: translateX(-50%) translateY(-20px); }
|
| 521 |
-
}
|
| 522 |
-
`;
|
| 523 |
-
document.head.appendChild(walkthroughStyle);
|
| 524 |
-
|
| 525 |
-
// Monitor training for educational moments
|
| 526 |
-
function monitorTrainingProgress() {
|
| 527 |
-
if (!walkthroughActive) return;
|
| 528 |
-
|
| 529 |
-
const checkProgress = () => {
|
| 530 |
-
if (!isTraining || !walkthroughActive) return;
|
| 531 |
-
|
| 532 |
-
// Look for breakthrough moments (rapid loss decrease)
|
| 533 |
-
if (lossHistory.length > 10) {
|
| 534 |
-
const recent = lossHistory.slice(-5);
|
| 535 |
-
const older = lossHistory.slice(-10, -5);
|
| 536 |
-
const recentAvg = recent.reduce((a, b) => a + b) / recent.length;
|
| 537 |
-
const olderAvg = older.reduce((a, b) => a + b) / older.length;
|
| 538 |
-
|
| 539 |
-
if (olderAvg > 0.5 && recentAvg < 0.1) {
|
| 540 |
-
addWalkthroughExplanation('🎉 Breakthrough moment! The AI just discovered the pattern - watch the loss plummet!');
|
| 541 |
-
}
|
| 542 |
-
}
|
| 543 |
-
|
| 544 |
-
// Check for high accuracy achievements
|
| 545 |
-
if (accuracy > 0.9 && epoch > 50) {
|
| 546 |
-
addWalkthroughExplanation('🎯 Excellent! The AI has mastered this pattern with over 90% accuracy!');
|
| 547 |
-
}
|
| 548 |
-
|
| 549 |
-
setTimeout(checkProgress, 2000);
|
| 550 |
-
};
|
| 551 |
-
|
| 552 |
-
setTimeout(checkProgress, 5000);
|
| 553 |
-
}
|
| 554 |
-
|
| 555 |
-
// Navigation functions
|
| 556 |
-
function nextWalkthroughStep() {
|
| 557 |
-
walkthroughStep++;
|
| 558 |
-
showWalkthroughStep();
|
| 559 |
-
}
|
| 560 |
-
|
| 561 |
-
function prevWalkthroughStep() {
|
| 562 |
-
if (walkthroughStep > 0) {
|
| 563 |
-
walkthroughStep--;
|
| 564 |
-
showWalkthroughStep();
|
| 565 |
-
}
|
| 566 |
-
}
|
| 567 |
-
|
| 568 |
-
function endWalkthrough() {
|
| 569 |
-
walkthroughActive = false;
|
| 570 |
-
walkthroughTutorial = null;
|
| 571 |
-
walkthroughStep = 0;
|
| 572 |
-
walkthroughExplanationMode = false;
|
| 573 |
-
walkthroughTrainingSpeed = 2000; // Reset to slow but not too slow
|
| 574 |
-
|
| 575 |
-
// Hide walkthrough UI
|
| 576 |
-
walkthroughOverlay.style.display = 'none';
|
| 577 |
-
walkthroughPopup.style.display = 'none';
|
| 578 |
-
walkthroughHighlight.style.display = 'none';
|
| 579 |
-
walkthroughProgress.style.display = 'none';
|
| 580 |
-
walkthroughIndicator.style.display = 'none';
|
| 581 |
-
|
| 582 |
-
// Clear any intervals
|
| 583 |
-
if (walkthroughInterval) {
|
| 584 |
-
clearInterval(walkthroughInterval);
|
| 585 |
-
walkthroughInterval = null;
|
| 586 |
-
}
|
| 587 |
-
|
| 588 |
-
// Show completion message
|
| 589 |
-
addWalkthroughExplanation('🎓 Walkthrough complete! You now understand neural networks better. Try exploring other tasks!');
|
| 590 |
-
|
| 591 |
-
// Return to normal training speed when walkthrough ends
|
| 592 |
-
setTimeout(() => {
|
| 593 |
-
walkthroughTrainingSpeed = 100; // Back to normal speed
|
| 594 |
-
}, 5000);
|
| 595 |
-
}
|
| 596 |
-
|
| 597 |
-
// Event listeners for walkthrough
|
| 598 |
-
walkthroughNext.addEventListener('click', nextWalkthroughStep);
|
| 599 |
-
walkthroughPrev.addEventListener('click', prevWalkthroughStep);
|
| 600 |
-
walkthroughSkip.addEventListener('click', endWalkthrough);
|
| 601 |
-
|
| 602 |
-
// Keyboard navigation for walkthrough
|
| 603 |
-
document.addEventListener('keydown', (e) => {
|
| 604 |
-
if (!walkthroughActive) return;
|
| 605 |
-
|
| 606 |
-
if (e.key === 'ArrowRight' || e.key === ' ') {
|
| 607 |
-
e.preventDefault();
|
| 608 |
-
nextWalkthroughStep();
|
| 609 |
-
} else if (e.key === 'ArrowLeft') {
|
| 610 |
-
e.preventDefault();
|
| 611 |
-
prevWalkthroughStep();
|
| 612 |
-
} else if (e.key === 'Escape') {
|
| 613 |
-
e.preventDefault();
|
| 614 |
-
endWalkthrough();
|
| 615 |
-
}
|
| 616 |
-
});
|
| 617 |
-
|
| 618 |
-
// Enhanced training step for walkthrough mode
|
| 619 |
-
function walkthroughTrainStep() {
|
| 620 |
-
if (!walkthroughActive) {
|
| 621 |
-
trainStep(); // Use normal training
|
| 622 |
-
return;
|
| 623 |
-
}
|
| 624 |
-
|
| 625 |
-
// Detailed step-by-step training for education
|
| 626 |
-
const sample = currentTask.data[currentSample];
|
| 627 |
-
|
| 628 |
-
// 1. Forward propagation with detailed explanation
|
| 629 |
-
const output = network.forward(sample.input);
|
| 630 |
-
activations = [...network.activations];
|
| 631 |
-
|
| 632 |
-
if (walkthroughExplanationMode && epoch % 10 === 0) {
|
| 633 |
-
const explanation = `Step ${epoch}: Processing ${sample.label}. ` +
|
| 634 |
-
`Input: [${sample.input.join(', ')}] → ` +
|
| 635 |
-
`Hidden activations → ` +
|
| 636 |
-
`Output: ${output[0].toFixed(3)} (target: ${sample.target[0]})`;
|
| 637 |
-
console.log(explanation); // For developers who open console
|
| 638 |
-
}
|
| 639 |
-
|
| 640 |
-
// 2. Calculate loss with explanation
|
| 641 |
-
const result = network.trainBatch([sample]);
|
| 642 |
-
currentLoss = result.loss;
|
| 643 |
-
weightChanges = result.weightChanges;
|
| 644 |
-
|
| 645 |
-
// 3. Update tracking variables
|
| 646 |
-
lossHistory.push(result.loss);
|
| 647 |
-
if (lossHistory.length > 100) lossHistory.shift();
|
| 648 |
-
|
| 649 |
-
// 4. Calculate predictions for all samples
|
| 650 |
-
const newPredictions = currentTask.data.map(data => {
|
| 651 |
-
const output = network.forward(data.input);
|
| 652 |
-
const rawOutput = output[0];
|
| 653 |
-
|
| 654 |
-
let predicted, correct;
|
| 655 |
-
if (currentTask.isRegression) {
|
| 656 |
-
predicted = rawOutput.toFixed(2);
|
| 657 |
-
let tolerance = 0.1;
|
| 658 |
-
|
| 659 |
-
if (currentTask.title.includes('Autoencoder')) {
|
| 660 |
-
tolerance = 0.2;
|
| 661 |
-
const allOutputs = network.forward(data.input);
|
| 662 |
-
let totalError = 0;
|
| 663 |
-
for (let j = 0; j < data.target.length; j++) {
|
| 664 |
-
totalError += Math.abs(allOutputs[j] - data.target[j]);
|
| 665 |
-
}
|
| 666 |
-
const avgError = totalError / data.target.length;
|
| 667 |
-
correct = avgError < tolerance;
|
| 668 |
-
} else {
|
| 669 |
-
correct = Math.abs(rawOutput - data.target[0]) < tolerance;
|
| 670 |
-
}
|
| 671 |
-
} else {
|
| 672 |
-
predicted = rawOutput >= 0.5 ? 1 : 0;
|
| 673 |
-
const target = data.target[0];
|
| 674 |
-
correct = predicted === target;
|
| 675 |
-
}
|
| 676 |
-
|
| 677 |
-
return {
|
| 678 |
-
...data,
|
| 679 |
-
output: rawOutput,
|
| 680 |
-
predicted: predicted,
|
| 681 |
-
correct: correct
|
| 682 |
-
};
|
| 683 |
-
});
|
| 684 |
-
predictions = newPredictions;
|
| 685 |
-
|
| 686 |
-
// 5. Update metrics
|
| 687 |
-
const correct = newPredictions.filter(p => p.correct).length;
|
| 688 |
-
accuracy = correct / newPredictions.length;
|
| 689 |
-
|
| 690 |
-
const totalLoss = newPredictions.reduce((sum, p) =>
|
| 691 |
-
sum + Math.pow(p.target[0] - p.output, 2), 0) / newPredictions.length;
|
| 692 |
-
avgLoss = totalLoss;
|
| 693 |
-
|
| 694 |
-
currentSample = (currentSample + 1) % currentTask.data.length;
|
| 695 |
-
epoch++;
|
| 696 |
-
|
| 697 |
-
// 6. Update UI
|
| 698 |
-
updateUI();
|
| 699 |
-
|
| 700 |
-
// 7. Special walkthrough feedback
|
| 701 |
-
if (walkthroughActive && epoch % 20 === 0) {
|
| 702 |
-
const accuracyPercent = (accuracy * 100).toFixed(1);
|
| 703 |
-
if (accuracy > 0.8) {
|
| 704 |
-
addWalkthroughExplanation(`🎉 Great progress! ${accuracyPercent}% accuracy - the AI is learning the pattern!`);
|
| 705 |
-
} else if (accuracy > 0.5) {
|
| 706 |
-
addWalkthroughExplanation(`📈 Learning... ${accuracyPercent}% accuracy. Watch the weights adapt!`);
|
| 707 |
-
}
|
| 708 |
-
}
|
| 709 |
-
}
|
| 710 |
-
|
| 711 |
-
// Override the original trainStep function when in walkthrough mode
|
| 712 |
-
const originalTrainStep = trainStep;
|
| 713 |
-
trainStep = function() {
|
| 714 |
-
if (walkthroughActive) {
|
| 715 |
-
walkthroughTrainStep();
|
| 716 |
-
} else {
|
| 717 |
-
originalTrainStep();
|
| 718 |
-
}
|
| 719 |
-
};
|
| 720 |
-
|
| 721 |
-
// Override training button behavior for walkthrough mode
|
| 722 |
-
const originalTrainBtnClick = trainBtn.onclick;
|
| 723 |
-
trainBtn.addEventListener('click', (e) => {
|
| 724 |
-
if (walkthroughActive) {
|
| 725 |
-
// Use much slower speed in walkthrough mode
|
| 726 |
-
isTraining = !isTraining;
|
| 727 |
-
|
| 728 |
-
if (isTraining) {
|
| 729 |
-
trainBtn.innerHTML = `
|
| 730 |
-
<svg class="icon" fill="currentColor" viewBox="0 0 24 24">
|
| 731 |
-
<path d="M6 19h4V5H6v14zm8-14v14h4V5h-4z"/>
|
| 732 |
-
</svg>
|
| 733 |
-
Pause Training
|
| 734 |
-
`;
|
| 735 |
-
trainBtn.className = 'btn btn-pause';
|
| 736 |
-
|
| 737 |
-
trainInterval = setInterval(trainStep, walkthroughTrainingSpeed);
|
| 738 |
-
} else {
|
| 739 |
-
trainBtn.innerHTML = `
|
| 740 |
-
<svg class="icon" fill="currentColor" viewBox="0 0 24 24">
|
| 741 |
-
<path d="M8 5v14l11-7z"/>
|
| 742 |
-
</svg>
|
| 743 |
-
Start Training
|
| 744 |
-
`;
|
| 745 |
-
trainBtn.className = 'btn btn-start';
|
| 746 |
-
|
| 747 |
-
clearInterval(trainInterval);
|
| 748 |
-
}
|
| 749 |
-
|
| 750 |
-
e.stopPropagation();
|
| 751 |
-
return false;
|
| 752 |
-
}
|
| 753 |
-
});
|
| 754 |
-
|
| 755 |
-
console.log('🎓 Walkthrough mode enhanced! Users can now learn step-by-step how neural networks work.');
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|