{ "title": "Support Vector Regression Mastery: 100 MCQs", "description": "A comprehensive set of 100 multiple-choice questions designed to teach and test your understanding of Support Vector Regression (SVR), starting from fundamental concepts to advanced topics like kernels, hyperparameter tuning, epsilon-insensitive loss, and real-world scenarios.", "questions": [ { "id": 1, "questionText": "What is the main goal of Support Vector Regression (SVR)?", "options": [ "To classify data points", "To cluster similar data points", "To reduce dimensionality", "To predict a continuous target variable while ignoring small errors within a margin" ], "correctAnswerIndex": 3, "explanation": "SVR tries to fit a function within a tube (epsilon-insensitive margin) around the true target values, minimizing errors outside the tube." }, { "id": 2, "questionText": "In SVR, what does the epsilon (ε) parameter represent?", "options": [ "Regularization strength", "Width of the margin in which no penalty is given for errors", "Learning rate", "Kernel type" ], "correctAnswerIndex": 1, "explanation": "Epsilon defines a margin of tolerance where predictions within ε of the true value are not penalized." }, { "id": 3, "questionText": "Which kernel is commonly used in SVR for non-linear relationships?", "options": [ "All of the above", "Polynomial kernel", "Linear kernel", "RBF (Radial Basis Function) kernel" ], "correctAnswerIndex": 0, "explanation": "SVR can use linear, polynomial, or RBF kernels depending on the nature of the data." }, { "id": 4, "questionText": "Scenario: SVR applied to a dataset with non-linear trend. Linear kernel used. Observation?", "options": [ "Training error zero", "Epsilon ignored", "Model performs perfectly", "Model underfits, poor predictions" ], "correctAnswerIndex": 3, "explanation": "A linear kernel cannot capture non-linear relationships, leading to underfitting." }, { "id": 5, "questionText": "Scenario: SVR with RBF kernel applied. Observation: very high gamma. Effect?", "options": [ "Overfitting, model follows data too closely", "Epsilon ignored", "Underfitting", "Training error zero" ], "correctAnswerIndex": 0, "explanation": "High gamma makes the kernel narrow, causing the model to fit noise and overfit the training data." }, { "id": 6, "questionText": "Scenario: SVR applied to dataset with features in different scales. Observation: model performs poorly. Reason?", "options": [ "Intercept missing", "Kernel type wrong", "Epsilon too high", "SVR is sensitive to feature scaling" ], "correctAnswerIndex": 3, "explanation": "SVR requires feature scaling (standardization/normalization) to perform correctly, especially with RBF or polynomial kernels." }, { "id": 7, "questionText": "Scenario: SVR applied with linear kernel and C too small. Observation?", "options": [ "Model overfits", "Model underfits, wide margin, many points outside epsilon", "Intercept removed", "Training error zero" ], "correctAnswerIndex": 1, "explanation": "Small C gives weak regularization, allowing a wide margin and underfitting the data." }, { "id": 8, "questionText": "Scenario: SVR applied with epsilon too large. Observation?", "options": [ "Kernel type irrelevant", "Model overfits", "Many predictions inside margin, poor accuracy", "C ignored" ], "correctAnswerIndex": 2, "explanation": "Large epsilon makes the model insensitive to small deviations, reducing accuracy." }, { "id": 9, "questionText": "Scenario: SVR with RBF kernel. Observation: gamma too small. Effect?", "options": [ "Overfits", "Epsilon ignored", "Model underfits, unable to capture complex patterns", "Training error zero" ], "correctAnswerIndex": 2, "explanation": "Small gamma produces a wide kernel, leading to underfitting and smooth predictions." }, { "id": 10, "questionText": "Scenario: SVR applied to dataset with outliers. Observation: model robust if epsilon-insensitive loss used. Why?", "options": [ "C is irrelevant", "Outliers always ignored", "Errors within epsilon not penalized, reducing influence of small deviations", "Kernel type changes automatically" ], "correctAnswerIndex": 2, "explanation": "Epsilon-insensitive loss ignores small deviations, making SVR less sensitive to minor noise." }, { "id": 11, "questionText": "Scenario: SVR applied on dataset with non-linear trends. Comparison: Linear vs RBF kernel. Observation?", "options": [ "Linear kernel always better", "Training error zero", "Epsilon irrelevant", "RBF performs better on non-linear data" ], "correctAnswerIndex": 3, "explanation": "RBF kernel can capture non-linear patterns, unlike linear kernel." }, { "id": 12, "questionText": "Scenario: SVR applied with C too high. Observation?", "options": [ "Epsilon ignored", "Kernel type irrelevant", "Overfitting, model tries to reduce training error aggressively", "Underfitting" ], "correctAnswerIndex": 2, "explanation": "High C penalizes errors heavily, making the model fit training points closely and overfit." }, { "id": 13, "questionText": "Scenario: SVR applied to dataset with standardized features. Observation: model performs well. Why?", "options": [ "Feature scaling ensures fair distance computation in kernel functions", "C irrelevant", "Epsilon ignored", "Intercept removed" ], "correctAnswerIndex": 0, "explanation": "Scaling is crucial because SVR uses distances (especially RBF/polynomial) which are affected by feature scales." }, { "id": 14, "questionText": "Scenario: SVR applied with polynomial kernel degree 3. Observation: model captures cubic trends. Limitation?", "options": [ "Underfits always", "Epsilon ignored", "May overfit if degree too high or C large", "Intercept removed" ], "correctAnswerIndex": 2, "explanation": "Higher-degree polynomial kernels can model complex trends but may overfit if hyperparameters not tuned." }, { "id": 15, "questionText": "Scenario: SVR applied with epsilon=0.1. Observation: residuals smaller than 0.1 ignored. Effect?", "options": [ "Overfits all points", "Model focuses only on significant errors, reducing sensitivity to noise", "Training error zero", "C irrelevant" ], "correctAnswerIndex": 1, "explanation": "Residuals within epsilon are not penalized, allowing robustness to small fluctuations." }, { "id": 16, "questionText": "Scenario: SVR applied to dataset with few samples and high dimensions. Observation: kernel choice critical. Why?", "options": [ "Linear kernel always overfits", "C too small", "Epsilon irrelevant", "High-dimensional kernels like RBF can overfit small samples" ], "correctAnswerIndex": 3, "explanation": "High-dimensional kernels can overfit small datasets; careful kernel selection is needed." }, { "id": 17, "questionText": "Scenario: SVR applied with RBF kernel. Observation: both C and gamma tuned via grid search. Purpose?", "options": [ "Always minimize training error", "Ignore epsilon", "Find optimal hyperparameters balancing bias and variance", "Remove intercept" ], "correctAnswerIndex": 2, "explanation": "Grid search helps select C and gamma that prevent under/overfitting and optimize generalization." }, { "id": 18, "questionText": "Scenario: SVR applied with very small epsilon. Observation?", "options": [ "C irrelevant", "Underfits", "Model tries to fit nearly all points, risk of overfitting", "Kernel type irrelevant" ], "correctAnswerIndex": 2, "explanation": "Small epsilon reduces tolerance, making SVR try to fit almost all data points, increasing risk of overfitting." }, { "id": 19, "questionText": "Scenario: SVR applied to dataset with noisy measurements. Observation: epsilon too small. Effect?", "options": [ "Underfits", "Training error zero", "Intercept ignored", "Model overfits noise" ], "correctAnswerIndex": 3, "explanation": "Small epsilon forces model to fit noisy points, reducing generalization." }, { "id": 20, "questionText": "Scenario: SVR with RBF kernel. Observation: gamma increased while C fixed. Effect?", "options": [ "Underfits", "Model captures fine patterns but may overfit", "Training error zero", "Intercept ignored" ], "correctAnswerIndex": 1, "explanation": "Higher gamma makes the kernel more sensitive, fitting data tightly and risking overfitting." }, { "id": 21, "questionText": "Scenario: SVR applied to dataset with outliers. Observation: large epsilon. Effect?", "options": [ "Model ignores small deviations and is robust to outliers", "Overfits outliers", "Underfits severely", "Training error zero" ], "correctAnswerIndex": 0, "explanation": "Large epsilon reduces sensitivity to small deviations, providing robustness to outliers." }, { "id": 22, "questionText": "Scenario: SVR applied with polynomial kernel degree=5, C and epsilon tuned. Observation?", "options": [ "Training error zero", "Model can capture complex non-linear trends with controlled overfitting", "Intercept removed", "Underfits always" ], "correctAnswerIndex": 1, "explanation": "Polynomial kernel allows modeling non-linear trends; tuning C and epsilon controls overfitting." }, { "id": 23, "questionText": "Scenario: SVR applied with linear kernel to a mostly linear dataset. Observation?", "options": [ "Model overfits", "Model performs well, simple and interpretable", "Epsilon irrelevant", "Underfits" ], "correctAnswerIndex": 1, "explanation": "Linear kernel is ideal for linear relationships, providing simplicity and interpretability." }, { "id": 24, "questionText": "Scenario: SVR applied to dataset with standardized features and epsilon=0.2. Observation?", "options": [ "C irrelevant", "Residuals within 0.2 are ignored, reducing sensitivity to minor noise", "Overfits all points", "Intercept removed" ], "correctAnswerIndex": 1, "explanation": "Epsilon-insensitive loss allows ignoring minor deviations, improving robustness." }, { "id": 25, "questionText": "Scenario: SVR applied to dataset with varying scales. Observation: without scaling, RBF kernel fails. Reason?", "options": [ "C irrelevant", "Linear kernel always fails", "Distance-based kernels are affected by feature scales", "Epsilon ignored" ], "correctAnswerIndex": 2, "explanation": "RBF kernel depends on Euclidean distance, so unscaled features distort similarity computation." }, { "id": 26, "questionText": "Scenario: SVR applied to stock price dataset with non-linear trends. Linear kernel used. Observation?", "options": [ "Overfits perfectly", "Underfits, poor predictions", "Intercept ignored", "Training error zero" ], "correctAnswerIndex": 1, "explanation": "Linear kernel cannot capture non-linear trends, leading to underfitting in stock price prediction." }, { "id": 27, "questionText": "Scenario: SVR applied with RBF kernel to housing dataset. Observation: gamma too high. Effect?", "options": [ "Overfitting, model captures noise", "Training error zero", "Epsilon ignored", "Underfitting" ], "correctAnswerIndex": 0, "explanation": "High gamma makes the kernel too narrow, causing overfitting to training data." }, { "id": 28, "questionText": "Scenario: SVR applied to dataset with features in different ranges. Observation: model performs poorly. Solution?", "options": [ "Use linear kernel only", "Standardize or normalize features", "Decrease C", "Increase epsilon" ], "correctAnswerIndex": 1, "explanation": "Scaling ensures fair distance calculation for kernel-based SVR models." }, { "id": 29, "questionText": "Scenario: SVR applied with small C. Observation?", "options": [ "Intercept ignored", "Training error zero", "Wide margin, underfitting", "Overfitting" ], "correctAnswerIndex": 2, "explanation": "Small C allows more points outside the margin, leading to underfitting." }, { "id": 30, "questionText": "Scenario: SVR applied with very large epsilon. Observation?", "options": [ "Training error zero", "Overfits noise", "C irrelevant", "Model ignores small deviations, accuracy drops" ], "correctAnswerIndex": 3, "explanation": "Large epsilon reduces sensitivity to deviations, decreasing accuracy." }, { "id": 31, "questionText": "Scenario: SVR applied with polynomial kernel degree=3. Observation: overfitting on small dataset. Solution?", "options": [ "Ignore epsilon", "Increase gamma", "Reduce degree or tune C and epsilon", "Remove kernel" ], "correctAnswerIndex": 2, "explanation": "Reducing polynomial degree or tuning hyperparameters prevents overfitting on small datasets." }, { "id": 32, "questionText": "Scenario: SVR applied with RBF kernel. Observation: small gamma. Effect?", "options": [ "Overfits", "Training error zero", "Intercept ignored", "Model underfits, too smooth predictions" ], "correctAnswerIndex": 3, "explanation": "Small gamma creates a wide kernel, unable to capture complex patterns, leading to underfitting." }, { "id": 33, "questionText": "Scenario: SVR applied to time-series dataset. Observation: epsilon too small. Effect?", "options": [ "Training error zero", "Intercept ignored", "Underfits", "Model overfits minor fluctuations" ], "correctAnswerIndex": 3, "explanation": "Small epsilon forces the model to fit almost all data points, including noise." }, { "id": 34, "questionText": "Scenario: SVR applied with RBF kernel to noisy dataset. Observation: large epsilon. Effect?", "options": [ "Underfits severely", "Model ignores small deviations, robust to noise", "Overfits noise", "Training error zero" ], "correctAnswerIndex": 1, "explanation": "Epsilon-insensitive loss ignores minor deviations, reducing sensitivity to noise." }, { "id": 35, "questionText": "Scenario: SVR applied with cross-validation on C and gamma. Purpose?", "options": [ "Always minimize training error", "Find optimal hyperparameters to balance bias and variance", "Remove intercept", "Ignore epsilon" ], "correctAnswerIndex": 1, "explanation": "Cross-validation selects the best C and gamma values to improve generalization and prevent over/underfitting." }, { "id": 36, "questionText": "Scenario: SVR applied to housing dataset with RBF kernel. Observation: predictions very smooth. Reason?", "options": [ "Kernel linear", "Gamma too small, wide kernel", "Epsilon too small", "C too high" ], "correctAnswerIndex": 1, "explanation": "Small gamma causes the kernel to be wide, resulting in smooth underfitting predictions." }, { "id": 37, "questionText": "Scenario: SVR applied to dataset with standardized features. Observation: improved performance. Why?", "options": [ "C irrelevant", "Feature scaling ensures fair distance computation in kernel", "Epsilon ignored", "Intercept removed" ], "correctAnswerIndex": 1, "explanation": "Scaling is necessary because kernel functions depend on feature distances." }, { "id": 38, "questionText": "Scenario: SVR applied with RBF kernel. Observation: overfitting. Recommended action?", "options": [ "Decrease gamma or C, increase epsilon", "Increase gamma", "Remove kernel", "Decrease epsilon only" ], "correctAnswerIndex": 0, "explanation": "Reducing gamma or C and increasing epsilon reduces overfitting by simplifying the model." }, { "id": 39, "questionText": "Scenario: SVR applied to dataset with outliers. Observation: model robust. Reason?", "options": [ "Epsilon-insensitive loss ignores small deviations", "C too high", "Gamma too small", "Kernel linear" ], "correctAnswerIndex": 0, "explanation": "Epsilon-insensitive loss reduces impact of small errors, making SVR robust to noise/outliers." }, { "id": 40, "questionText": "Scenario: SVR applied with polynomial kernel degree 5, small dataset. Observation: overfitting. Solution?", "options": [ "Ignore epsilon", "Increase gamma", "Reduce degree or tune C and epsilon", "Remove kernel" ], "correctAnswerIndex": 2, "explanation": "High-degree polynomial can overfit; tuning reduces complexity and improves generalization." }, { "id": 41, "questionText": "Scenario: SVR applied to financial dataset. Observation: linear kernel performs well. Reason?", "options": [ "Linear kernel always best", "C irrelevant", "Data has mostly linear relationship", "Epsilon ignored" ], "correctAnswerIndex": 2, "explanation": "Linear kernel suffices if the underlying relationship is mostly linear." }, { "id": 42, "questionText": "Scenario: SVR applied with large epsilon. Observation: residuals within margin ignored. Effect?", "options": [ "Model ignores small deviations, reduces sensitivity to noise", "Overfits minor fluctuations", "Underfits severely", "Training error zero" ], "correctAnswerIndex": 0, "explanation": "Residuals within epsilon are not penalized, making SVR robust to minor deviations." }, { "id": 43, "questionText": "Scenario: SVR applied to time-series with RBF kernel. Observation: gamma too high. Effect?", "options": [ "Overfits, model fits noise", "Underfits", "Training error zero", "Epsilon ignored" ], "correctAnswerIndex": 0, "explanation": "High gamma makes kernel very narrow, overfitting small fluctuations in time-series." }, { "id": 44, "questionText": "Scenario: SVR applied to dataset with 50 features, 200 samples. Observation: gamma and C tuned via grid search. Advantage?", "options": [ "Remove intercept", "Training error minimized only", "Optimal bias-variance tradeoff", "Ignore epsilon" ], "correctAnswerIndex": 2, "explanation": "Grid search finds hyperparameters that optimize generalization." }, { "id": 45, "questionText": "Scenario: SVR applied to dataset with noise. Observation: small epsilon. Effect?", "options": [ "Underfits", "Overfits noise, poor generalization", "C irrelevant", "Intercept removed" ], "correctAnswerIndex": 1, "explanation": "Small epsilon reduces tolerance, forcing the model to fit almost all points, including noise." }, { "id": 46, "questionText": "Scenario: SVR applied with linear kernel to mostly linear dataset. Observation?", "options": [ "Epsilon ignored", "Underfits", "Good performance, simple and interpretable", "Overfits" ], "correctAnswerIndex": 2, "explanation": "Linear kernel works well for mostly linear relationships." }, { "id": 47, "questionText": "Scenario: SVR applied with RBF kernel, standardized features, tuned C and gamma. Observation?", "options": [ "Overfits always", "Model captures non-linear trends accurately", "Intercept ignored", "Underfits always" ], "correctAnswerIndex": 1, "explanation": "RBF kernel with proper tuning captures non-linear trends effectively." }, { "id": 48, "questionText": "Scenario: SVR applied with polynomial kernel degree 4. Observation: training error very low but test error high. Reason?", "options": [ "C too small", "Overfitting due to high-degree polynomial", "Underfitting", "Epsilon too large" ], "correctAnswerIndex": 1, "explanation": "High-degree polynomial can fit training data too closely, leading to overfitting." }, { "id": 49, "questionText": "Scenario: SVR applied to dataset with features of different scales. Observation: model poor. Solution?", "options": [ "Decrease gamma", "Increase epsilon", "Standardize features", "Use linear kernel only" ], "correctAnswerIndex": 2, "explanation": "Feature scaling is essential because SVR uses distances in kernel computation." }, { "id": 50, "questionText": "Scenario: SVR applied to stock market dataset with RBF kernel. Observation: epsilon-insensitive tube too wide. Effect?", "options": [ "C irrelevant", "Training error zero", "Model overfits", "Many small deviations ignored, poor accuracy" ], "correctAnswerIndex": 3, "explanation": "Wide epsilon tube ignores small deviations, reducing prediction accuracy." }, { "id": 51, "questionText": "Scenario: SVR applied to high-frequency stock price data. Observation: RBF kernel with very small gamma. Effect?", "options": [ "Epsilon ignored", "Training error zero", "Overfits noise", "Underfits, fails to capture rapid changes" ], "correctAnswerIndex": 3, "explanation": "Small gamma produces a wide kernel, smoothing predictions and missing rapid fluctuations." }, { "id": 52, "questionText": "Scenario: SVR applied to real estate dataset. Observation: polynomial kernel degree 6 overfits. Solution?", "options": [ "Remove kernel", "Reduce polynomial degree or tune C and epsilon", "Increase epsilon only", "Increase gamma" ], "correctAnswerIndex": 1, "explanation": "High-degree polynomials can model noise; lowering degree or tuning parameters prevents overfitting." }, { "id": 53, "questionText": "Scenario: SVR applied with RBF kernel, large epsilon, and small C. Observation?", "options": [ "Overfits", "Model underfits, ignores small deviations, wide margin", "Intercept ignored", "Training error zero" ], "correctAnswerIndex": 1, "explanation": "Small C and large epsilon reduce sensitivity to errors, causing underfitting." }, { "id": 54, "questionText": "Scenario: SVR applied to noisy sensor dataset. Observation: small epsilon, large C. Effect?", "options": [ "Kernel ignored", "Underfits", "Training error zero", "Overfits noise, poor generalization" ], "correctAnswerIndex": 3, "explanation": "Small epsilon and large C force the model to fit almost all points, including noise." }, { "id": 55, "questionText": "Scenario: SVR applied with RBF kernel on financial data. Observation: gamma tuned via cross-validation. Purpose?", "options": [ "Ignore epsilon", "Always minimize training error", "Balance bias and variance for better generalization", "Remove intercept" ], "correctAnswerIndex": 2, "explanation": "Cross-validation helps find gamma that prevents under/overfitting and improves prediction on unseen data." }, { "id": 56, "questionText": "Scenario: SVR applied to a dataset with highly correlated features. Observation: performance similar across linear and RBF kernels. Reason?", "options": [ "C too small", "Epsilon ignored", "Data relationship mostly linear", "Linear kernel always best" ], "correctAnswerIndex": 2, "explanation": "When features have linear relationships, both linear and RBF kernels give similar performance." }, { "id": 57, "questionText": "Scenario: SVR applied to dataset with extreme outliers. Observation: large epsilon. Effect?", "options": [ "Overfits outliers", "Training error zero", "Underfits completely", "Reduces sensitivity to outliers, robust predictions" ], "correctAnswerIndex": 3, "explanation": "Large epsilon ignores small deviations, reducing outlier influence." }, { "id": 58, "questionText": "Scenario: SVR applied with polynomial kernel degree 5. Observation: low-degree coefficients dominate. Reason?", "options": [ "Epsilon ignored", "Overfits noise", "High-degree terms penalized by regularization, low-degree terms capture main trend", "Training error zero" ], "correctAnswerIndex": 2, "explanation": "Regularization and parameter tuning often shrink high-degree polynomial effects, letting low-degree terms dominate." }, { "id": 59, "questionText": "Scenario: SVR applied to dataset with 50 features and 200 samples. Observation: gamma and C tuned via grid search. Advantage?", "options": [ "Optimizes bias-variance tradeoff", "Epsilon irrelevant", "Always minimize training error", "Removes intercept" ], "correctAnswerIndex": 0, "explanation": "Grid search selects hyperparameters that generalize best to unseen data." }, { "id": 60, "questionText": "Scenario: SVR applied to time-series dataset with RBF kernel. Observation: gamma high, C high, epsilon small. Effect?", "options": [ "Overfits training data, poor generalization", "Intercept irrelevant", "Underfits", "Residuals ignored" ], "correctAnswerIndex": 0, "explanation": "High gamma and C with small epsilon make SVR fit almost all points, including noise." }, { "id": 61, "questionText": "Scenario: SVR applied to housing data with standardized features. Observation: performance improved. Reason?", "options": [ "Linear kernel preferred", "Kernel distances computed correctly after standardization", "C irrelevant", "Epsilon ignored" ], "correctAnswerIndex": 1, "explanation": "Standardization ensures kernel computations are not skewed by feature scales." }, { "id": 62, "questionText": "Scenario: SVR applied to stock dataset. Observation: predictions smooth, underfitting. Likely cause?", "options": [ "Training error zero", "C too high", "Kernel linear", "Gamma too small or epsilon too large" ], "correctAnswerIndex": 3, "explanation": "Small gamma or large epsilon leads to overly smooth predictions, missing complex trends." }, { "id": 63, "questionText": "Scenario: SVR applied to dataset with missing values. Observation: training fails. Solution?", "options": [ "Impute or remove missing values", "Decrease C", "Reduce epsilon", "Change kernel" ], "correctAnswerIndex": 0, "explanation": "SVR cannot handle missing values; preprocessing is required." }, { "id": 64, "questionText": "Scenario: SVR applied to dataset with outliers. Observation: small epsilon, high C. Effect?", "options": [ "Underfits", "Overfits outliers", "Training error zero", "Intercept ignored" ], "correctAnswerIndex": 1, "explanation": "Small epsilon and high C force the model to fit all points, including outliers." }, { "id": 65, "questionText": "Scenario: SVR applied with linear kernel to mostly linear data. Observation: performance comparable to RBF. Reason?", "options": [ "C too small", "Epsilon ignored", "Linear relationship dominant in data", "Linear kernel always best" ], "correctAnswerIndex": 2, "explanation": "If the underlying trend is linear, linear and RBF kernels give similar results." }, { "id": 66, "questionText": "Scenario: SVR applied with polynomial kernel degree 4 on small dataset. Observation: overfitting. Solution?", "options": [ "Remove kernel", "Increase gamma", "Ignore epsilon", "Reduce degree or tune C and epsilon" ], "correctAnswerIndex": 3, "explanation": "High-degree polynomial can overfit small datasets; tuning reduces complexity." }, { "id": 67, "questionText": "Scenario: SVR applied to dataset with noisy features. Observation: epsilon-insensitive tube helps. Effect?", "options": [ "Training error zero", "Overfits all points", "Model robust to minor noise, reduces variance", "Intercept ignored" ], "correctAnswerIndex": 2, "explanation": "Epsilon-insensitive loss ignores small deviations, improving robustness." }, { "id": 68, "questionText": "Scenario: SVR applied to real estate dataset. Observation: small gamma, large epsilon. Effect?", "options": [ "Underfitting, predictions too smooth", "Training error zero", "Overfitting", "Intercept ignored" ], "correctAnswerIndex": 0, "explanation": "Small gamma widens the kernel, large epsilon ignores deviations, producing smooth underfit predictions." }, { "id": 69, "questionText": "Scenario: SVR applied to dataset with highly non-linear trends. Observation: RBF kernel tuned well. Effect?", "options": [ "Training error zero", "Captures non-linear patterns accurately", "Underfits", "Intercept ignored" ], "correctAnswerIndex": 1, "explanation": "Properly tuned RBF kernel models non-linear trends effectively." }, { "id": 70, "questionText": "Scenario: SVR applied to time-series dataset. Observation: predictions lag behind sudden spikes. Likely cause?", "options": [ "Kernel linear", "Epsilon too large, gamma too small", "C too high", "Training error zero" ], "correctAnswerIndex": 1, "explanation": "Large epsilon ignores small deviations; small gamma smooths predictions, causing lag." }, { "id": 71, "questionText": "Scenario: SVR applied to standardized financial dataset. Observation: model captures trends well. Reason?", "options": [ "Scaling ensures kernel distances computed correctly", "Linear kernel preferred", "Epsilon ignored", "C irrelevant" ], "correctAnswerIndex": 0, "explanation": "Standardized features prevent distance distortions in kernel computations." }, { "id": 72, "questionText": "Scenario: SVR applied to dataset with high-dimensional features and few samples. Observation: RBF kernel overfits. Solution?", "options": [ "Increase epsilon", "Ignore feature scaling", "Decrease C", "Reduce gamma or use linear kernel" ], "correctAnswerIndex": 3, "explanation": "High-dimensional kernels can overfit small datasets; reducing complexity helps." }, { "id": 73, "questionText": "Scenario: SVR applied to housing dataset with polynomial kernel degree 3. Observation: small epsilon improves robustness. Reason?", "options": [ "Epsilon balances sensitivity to minor deviations", "Overfits all points", "C irrelevant", "Training error zero" ], "correctAnswerIndex": 0, "explanation": "Appropriate epsilon allows ignoring small noise while fitting main trends." }, { "id": 74, "questionText": "Scenario: SVR applied with grid search for C, gamma, epsilon. Observation: selected parameters give best validation performance. Benefit?", "options": [ "Optimal hyperparameters improve generalization", "Always minimize training error", "Intercept removed", "Kernel linear" ], "correctAnswerIndex": 0, "explanation": "Grid search helps select parameters that balance bias and variance for unseen data." }, { "id": 75, "questionText": "Scenario: SVR applied to financial dataset with RBF kernel. Observation: small epsilon, high C, high gamma. Effect?", "options": [ "Overfits training data, poor generalization", "Underfits", "Residuals ignored", "Intercept irrelevant" ], "correctAnswerIndex": 0, "explanation": "Small epsilon, high C, and high gamma make the model fit almost all points, including noise, causing overfitting." }, { "id": 76, "questionText": "Scenario: SVR applied to high-frequency trading data. Observation: RBF kernel, gamma extremely high. Effect?", "options": [ "Overfits to noise, poor generalization", "Underfits trends", "Training error zero", "Epsilon ignored" ], "correctAnswerIndex": 0, "explanation": "Very high gamma makes the kernel very narrow, fitting noise and causing overfitting." }, { "id": 77, "questionText": "Scenario: SVR applied to housing dataset with polynomial kernel degree=7. Observation: model unstable. Solution?", "options": [ "Reduce polynomial degree or tune C and epsilon", "Increase gamma", "Remove kernel", "Increase epsilon only" ], "correctAnswerIndex": 0, "explanation": "High-degree polynomials overfit and produce instability; reducing degree or tuning hyperparameters stabilizes predictions." }, { "id": 78, "questionText": "Scenario: SVR applied to dataset with missing features. Observation: model fails. Solution?", "options": [ "Impute or remove missing values", "Decrease epsilon", "Change kernel", "Decrease C" ], "correctAnswerIndex": 0, "explanation": "SVR cannot handle missing values; preprocessing is required." }, { "id": 79, "questionText": "Scenario: SVR applied to noisy stock market data. Observation: small epsilon, high C. Effect?", "options": [ "Overfits noise, poor generalization", "Underfits trends", "Training error zero", "Intercept ignored" ], "correctAnswerIndex": 0, "explanation": "Small epsilon and high C force the model to fit almost all points, including noise." }, { "id": 80, "questionText": "Scenario: SVR applied with RBF kernel, gamma too small, epsilon too large. Observation?", "options": [ "Underfits, predictions too smooth", "Overfits", "Training error zero", "Intercept ignored" ], "correctAnswerIndex": 0, "explanation": "Small gamma and large epsilon make the model insensitive, producing overly smooth predictions." }, { "id": 81, "questionText": "Scenario: SVR applied to real estate dataset. Observation: grid search used for C, gamma, epsilon. Benefit?", "options": [ "Optimal hyperparameters balance bias and variance", "Always minimize training error", "Remove intercept", "Kernel linear" ], "correctAnswerIndex": 0, "explanation": "Grid search finds the best combination of hyperparameters to generalize well on unseen data." }, { "id": 82, "questionText": "Scenario: SVR applied to dataset with standardized features. Observation: model improves. Reason?", "options": [ "Kernel distances computed correctly after scaling", "Linear kernel preferred", "Epsilon ignored", "C irrelevant" ], "correctAnswerIndex": 0, "explanation": "Standardization prevents skewing of kernel distance calculations." }, { "id": 83, "questionText": "Scenario: SVR applied to time-series dataset. Observation: model lags sudden spikes. Likely cause?", "options": [ "Epsilon too large or gamma too small", "C too high", "Kernel linear", "Training error zero" ], "correctAnswerIndex": 0, "explanation": "Large epsilon and small gamma smooth predictions, causing lag behind rapid changes." }, { "id": 84, "questionText": "Scenario: SVR applied to small dataset with high-dimensional features. Observation: RBF kernel overfits. Solution?", "options": [ "Reduce gamma or use linear kernel", "Increase epsilon", "Decrease C", "Ignore feature scaling" ], "correctAnswerIndex": 0, "explanation": "High-dimensional kernels can overfit small datasets; reducing complexity helps." }, { "id": 85, "questionText": "Scenario: SVR applied with polynomial kernel degree=4. Observation: training error low, test error high. Reason?", "options": [ "Overfitting due to high-degree polynomial", "Underfitting", "Epsilon too large", "C too small" ], "correctAnswerIndex": 0, "explanation": "High-degree polynomials fit training data too closely, causing poor generalization." }, { "id": 86, "questionText": "Scenario: SVR applied to financial dataset with RBF kernel. Observation: small epsilon, high C, moderate gamma. Effect?", "options": [ "Model fits closely to significant deviations, balances noise", "Underfits", "Overfits all points", "Intercept ignored" ], "correctAnswerIndex": 0, "explanation": "Small epsilon and high C make SVR sensitive to important deviations while ignoring minor noise." }, { "id": 87, "questionText": "Scenario: SVR applied with RBF kernel. Observation: gamma increases, epsilon constant. Effect?", "options": [ "Model becomes more sensitive to small patterns, may overfit", "Underfits", "Training error zero", "Intercept ignored" ], "correctAnswerIndex": 0, "explanation": "Increasing gamma narrows the kernel, increasing sensitivity to variations and risk of overfitting." }, { "id": 88, "questionText": "Scenario: SVR applied to dataset with noise. Observation: increasing epsilon. Effect?", "options": [ "Model ignores minor deviations, improves robustness", "Overfits all points", "Training error zero", "Intercept ignored" ], "correctAnswerIndex": 0, "explanation": "Higher epsilon creates a wider tube, reducing sensitivity to noise." }, { "id": 89, "questionText": "Scenario: SVR applied with polynomial kernel. Observation: higher-degree terms dominate predictions. Effect?", "options": [ "May overfit, complex curve", "Underfits", "Training error zero", "Epsilon ignored" ], "correctAnswerIndex": 0, "explanation": "High-degree polynomial terms can produce complex predictions and overfitting." }, { "id": 90, "questionText": "Scenario: SVR applied to dataset with large number of outliers. Observation: large epsilon. Effect?", "options": [ "Model ignores small deviations, robust to noise", "Overfits outliers", "Underfits completely", "Training error zero" ], "correctAnswerIndex": 0, "explanation": "Large epsilon reduces sensitivity to minor deviations, improving robustness against noise and outliers." }, { "id": 91, "questionText": "Scenario: SVR applied to financial time-series dataset. Observation: gamma and C tuned via grid search. Advantage?", "options": [ "Optimal hyperparameters improve generalization", "Always minimize training error", "Intercept removed", "Kernel linear" ], "correctAnswerIndex": 0, "explanation": "Grid search balances bias and variance by selecting optimal hyperparameters." }, { "id": 92, "questionText": "Scenario: SVR applied to dataset with standardized features. Observation: model improves. Reason?", "options": [ "Kernel distances computed correctly after scaling", "Linear kernel preferred", "Epsilon ignored", "C irrelevant" ], "correctAnswerIndex": 0, "explanation": "Feature scaling ensures proper kernel distance calculations." }, { "id": 93, "questionText": "Scenario: SVR applied to time-series dataset. Observation: epsilon too small, C high. Effect?", "options": [ "Overfits minor fluctuations", "Underfits", "Residuals ignored", "Intercept irrelevant" ], "correctAnswerIndex": 0, "explanation": "Small epsilon and high C force SVR to fit almost all points, including noise." }, { "id": 94, "questionText": "Scenario: SVR applied to dataset with high-dimensional features. Observation: linear kernel better than RBF. Reason?", "options": [ "RBF overfits due to limited samples", "Linear kernel always better", "Epsilon ignored", "C too small" ], "correctAnswerIndex": 0, "explanation": "High-dimensional RBF kernels can overfit when sample size is small; linear kernel is safer." }, { "id": 95, "questionText": "Scenario: SVR applied with RBF kernel, large epsilon, small C. Observation?", "options": [ "Underfits, wide margin, ignores minor deviations", "Overfits", "Training error zero", "Intercept ignored" ], "correctAnswerIndex": 0, "explanation": "Large epsilon and small C reduce sensitivity to errors, leading to underfitting." }, { "id": 96, "questionText": "Scenario: SVR applied to small dataset with polynomial kernel degree 5. Observation: overfitting. Solution?", "options": [ "Reduce degree or tune C and epsilon", "Increase gamma", "Ignore epsilon", "Remove kernel" ], "correctAnswerIndex": 0, "explanation": "High-degree polynomial can overfit; tuning reduces complexity and improves generalization." }, { "id": 97, "questionText": "Scenario: SVR applied to financial data. Observation: epsilon-insensitive tube reduces minor prediction errors. Benefit?", "options": [ "Robustness to noise, better generalization", "Overfits all points", "Training error zero", "Intercept ignored" ], "correctAnswerIndex": 0, "explanation": "Epsilon-insensitive loss ignores minor deviations, enhancing robustness." }, { "id": 98, "questionText": "Scenario: SVR applied with RBF kernel. Observation: gamma too high, epsilon small. Effect?", "options": [ "Overfits noise, poor generalization", "Underfits", "Training error zero", "Intercept ignored" ], "correctAnswerIndex": 0, "explanation": "High gamma and small epsilon make SVR fit closely to every data point, including noise." }, { "id": 99, "questionText": "Scenario: SVR applied to housing dataset with high feature correlation. Observation: performance similar for linear and RBF kernels. Reason?", "options": [ "Dominant linear relationships reduce kernel differences", "Linear kernel always better", "Epsilon ignored", "C too small" ], "correctAnswerIndex": 0, "explanation": "If relationships are mostly linear, both linear and RBF kernels perform similarly." }, { "id": 100, "questionText": "Scenario: SVR applied to stock price dataset. Observation: gamma, C, epsilon tuned optimally via grid search. Result?", "options": [ "Accurate predictions, optimal bias-variance tradeoff", "Underfits all trends", "Overfits all points", "Training error zero" ], "correctAnswerIndex": 0, "explanation": "Optimal hyperparameter tuning via grid search ensures good generalization and accurate predictions." } ] }