Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -15,154 +15,8 @@ def get_popular_tickers():
|
|
| 15 |
"JNJ", "V", "PG", "WMT", "BAC", "DIS", "NFLX", "INTC"
|
| 16 |
]
|
| 17 |
|
| 18 |
-
|
| 19 |
-
try:
|
| 20 |
-
# Asegurar que los parámetros sean enteros
|
| 21 |
-
train_data_points = int(train_data_points)
|
| 22 |
-
prediction_days = int(prediction_days)
|
| 23 |
-
|
| 24 |
-
# Configurar el pipeline
|
| 25 |
-
pipeline = ChronosPipeline.from_pretrained(
|
| 26 |
-
"amazon/chronos-t5-mini",
|
| 27 |
-
device_map="cpu",
|
| 28 |
-
torch_dtype=torch.float32
|
| 29 |
-
)
|
| 30 |
-
|
| 31 |
-
# Obtener la cantidad máxima de datos disponibles
|
| 32 |
-
stock = yf.Ticker(ticker)
|
| 33 |
-
hist = stock.history(period="max")
|
| 34 |
-
stock_prices = hist[['Close']].reset_index()
|
| 35 |
-
df = stock_prices.rename(columns={'Date': 'Date', 'Close': f'{ticker}_Close'})
|
| 36 |
-
|
| 37 |
-
total_points = len(df)
|
| 38 |
-
|
| 39 |
-
# Asegurar que el número de datos de entrenamiento no exceda el total disponible
|
| 40 |
-
train_data_points = min(train_data_points, total_points)
|
| 41 |
-
|
| 42 |
-
# Crear el contexto para entrenamiento
|
| 43 |
-
context = torch.tensor(df[f'{ticker}_Close'][:train_data_points].values, dtype=torch.float32)
|
| 44 |
-
|
| 45 |
-
# Realizar predicción
|
| 46 |
-
forecast = pipeline.predict(context, prediction_days, limit_prediction_length=False)
|
| 47 |
-
low, median, high = np.quantile(forecast[0].numpy(), [0.01, 0.5, 0.99], axis=0)
|
| 48 |
-
|
| 49 |
-
plt.figure(figsize=(20, 10))
|
| 50 |
-
plt.clf()
|
| 51 |
-
|
| 52 |
-
# Determinar el rango de fechas para mostrar en el gráfico
|
| 53 |
-
context_days = min(10, train_data_points)
|
| 54 |
-
start_index = max(0, train_data_points - context_days)
|
| 55 |
-
end_index = min(train_data_points + prediction_days, total_points)
|
| 56 |
-
|
| 57 |
-
# Plotear datos históricos incluyendo datos reales después del entrenamiento
|
| 58 |
-
historical_dates = df['Date'][start_index:end_index]
|
| 59 |
-
historical_data = df[f'{ticker}_Close'][start_index:end_index].values
|
| 60 |
-
plt.plot(historical_dates,
|
| 61 |
-
historical_data,
|
| 62 |
-
color='blue',
|
| 63 |
-
linewidth=2,
|
| 64 |
-
label='Datos Reales')
|
| 65 |
-
|
| 66 |
-
# Crear fechas para la predicción considerando solo días hábiles
|
| 67 |
-
if train_data_points < total_points:
|
| 68 |
-
# Si hay más datos después del entrenamiento
|
| 69 |
-
prediction_start_date = df['Date'].iloc[train_data_points]
|
| 70 |
-
else:
|
| 71 |
-
# Si estamos en el último punto, generar fechas futuras
|
| 72 |
-
last_date = df['Date'].iloc[-1]
|
| 73 |
-
prediction_start_date = last_date + pd.Timedelta(days=1)
|
| 74 |
-
|
| 75 |
-
# Generar fechas de predicción solo en días hábiles
|
| 76 |
-
prediction_dates = pd.date_range(start=prediction_start_date, periods=prediction_days, freq='B')
|
| 77 |
-
|
| 78 |
-
# Plotear predicción
|
| 79 |
-
plt.plot(prediction_dates,
|
| 80 |
-
median,
|
| 81 |
-
color='black',
|
| 82 |
-
linewidth=2,
|
| 83 |
-
linestyle='-',
|
| 84 |
-
label='Predicción')
|
| 85 |
-
|
| 86 |
-
# Área de confianza
|
| 87 |
-
plt.fill_between(prediction_dates, low, high,
|
| 88 |
-
color='gray', alpha=0.2,
|
| 89 |
-
label='Intervalo de Confianza')
|
| 90 |
-
|
| 91 |
-
# Calcular métricas si hay datos reales para comparar
|
| 92 |
-
overlap_end_index = train_data_points + prediction_days
|
| 93 |
-
if overlap_end_index <= total_points:
|
| 94 |
-
real_future_dates = df['Date'][train_data_points:overlap_end_index]
|
| 95 |
-
real_future_data = df[f'{ticker}_Close'][train_data_points:overlap_end_index].values
|
| 96 |
-
|
| 97 |
-
# Asegurar que las fechas de predicción y las reales coincidan
|
| 98 |
-
matching_dates = real_future_dates[real_future_dates.isin(prediction_dates)]
|
| 99 |
-
matching_indices = matching_dates.index - train_data_points
|
| 100 |
-
plt.plot(matching_dates,
|
| 101 |
-
real_future_data[matching_indices],
|
| 102 |
-
color='red',
|
| 103 |
-
linewidth=2,
|
| 104 |
-
linestyle='--',
|
| 105 |
-
label='Datos Reales de Validación')
|
| 106 |
-
|
| 107 |
-
# Filtrar las predicciones que coinciden con las fechas reales
|
| 108 |
-
predicted_data = median[:len(matching_indices)]
|
| 109 |
-
mae = mean_absolute_error(real_future_data[matching_indices], predicted_data)
|
| 110 |
-
rmse = np.sqrt(mean_squared_error(real_future_data[matching_indices], predicted_data))
|
| 111 |
-
mape = np.mean(np.abs((real_future_data[matching_indices] - predicted_data) / real_future_data[matching_indices])) * 100
|
| 112 |
-
plt.title(f"Predicción del Precio de {ticker}\nMAE: {mae:.2f} | RMSE: {rmse:.2f} | MAPE: {mape:.2f}%",
|
| 113 |
-
fontsize=14, pad=20)
|
| 114 |
-
else:
|
| 115 |
-
plt.title(f"Predicción Futura del Precio de {ticker}",
|
| 116 |
-
fontsize=14, pad=20)
|
| 117 |
-
|
| 118 |
-
plt.legend(loc="upper left", fontsize=12)
|
| 119 |
-
plt.xlabel("Fecha", fontsize=12)
|
| 120 |
-
plt.ylabel("Precio", fontsize=12)
|
| 121 |
-
|
| 122 |
-
# Habilitar líneas de referencia diarias en el gráfico
|
| 123 |
-
plt.grid(True, which='both', axis='x', linestyle='--', linewidth=0.5)
|
| 124 |
-
|
| 125 |
-
# Formatear el eje x para mostrar las fechas correctamente y agregar líneas de referencia diarias
|
| 126 |
-
ax = plt.gca()
|
| 127 |
-
locator = mdates.DayLocator()
|
| 128 |
-
formatter = mdates.DateFormatter('%Y-%m-%d')
|
| 129 |
-
ax.xaxis.set_major_locator(locator)
|
| 130 |
-
ax.xaxis.set_major_formatter(formatter)
|
| 131 |
-
|
| 132 |
-
# Rotar las etiquetas de fecha
|
| 133 |
-
plt.setp(ax.get_xticklabels(), rotation=45, ha='right')
|
| 134 |
-
|
| 135 |
-
plt.tight_layout()
|
| 136 |
-
|
| 137 |
-
# Crear un archivo temporal para el CSV
|
| 138 |
-
temp_csv = tempfile.NamedTemporaryFile(delete=False, suffix='.csv')
|
| 139 |
-
prediction_df = pd.DataFrame({
|
| 140 |
-
'Date': prediction_dates,
|
| 141 |
-
'Predicted_Price': median,
|
| 142 |
-
'Lower_Bound': low,
|
| 143 |
-
'Upper_Bound': high
|
| 144 |
-
})
|
| 145 |
-
|
| 146 |
-
# Agregar datos reales si están disponibles y coinciden con las fechas de predicción
|
| 147 |
-
if overlap_end_index <= total_points:
|
| 148 |
-
real_future_dates = df['Date'][train_data_points:overlap_end_index]
|
| 149 |
-
real_future_data = df[f'{ticker}_Close'][train_data_points:overlap_end_index].values
|
| 150 |
-
matching_dates = real_future_dates[real_future_dates.isin(prediction_dates)]
|
| 151 |
-
prediction_df = prediction_df[prediction_df['Date'].isin(matching_dates)]
|
| 152 |
-
prediction_df['Real_Price'] = real_future_data[:len(prediction_df)]
|
| 153 |
-
|
| 154 |
-
# Guardar el DataFrame en el archivo temporal
|
| 155 |
-
prediction_df.to_csv(temp_csv.name, index=False)
|
| 156 |
-
temp_csv.close()
|
| 157 |
-
|
| 158 |
-
# Retornar el gráfico y la ruta del archivo CSV
|
| 159 |
-
return plt, temp_csv.name
|
| 160 |
-
|
| 161 |
-
except Exception as e:
|
| 162 |
-
print(f"Error: {str(e)}")
|
| 163 |
-
raise gr.Error(f"Error al procesar {ticker}: {str(e)}")
|
| 164 |
|
| 165 |
-
# Crear la interfaz de Gradio
|
| 166 |
with gr.Blocks() as demo:
|
| 167 |
gr.Markdown("# Aplicación de Predicción de Precios de Acciones")
|
| 168 |
|
|
@@ -170,6 +24,7 @@ with gr.Blocks() as demo:
|
|
| 170 |
with gr.Column(scale=1):
|
| 171 |
ticker = gr.Dropdown(
|
| 172 |
choices=get_popular_tickers(),
|
|
|
|
| 173 |
label="Selecciona el Símbolo de la Acción"
|
| 174 |
)
|
| 175 |
train_data_points = gr.Slider(
|
|
@@ -193,12 +48,18 @@ with gr.Blocks() as demo:
|
|
| 193 |
download_btn = gr.File(label="Descargar Predicciones")
|
| 194 |
|
| 195 |
def update_train_data_points(ticker):
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 202 |
|
| 203 |
ticker.change(
|
| 204 |
fn=update_train_data_points,
|
|
@@ -212,4 +73,4 @@ with gr.Blocks() as demo:
|
|
| 212 |
outputs=[plot_output, download_btn]
|
| 213 |
)
|
| 214 |
|
| 215 |
-
demo.launch()
|
|
|
|
| 15 |
"JNJ", "V", "PG", "WMT", "BAC", "DIS", "NFLX", "INTC"
|
| 16 |
]
|
| 17 |
|
| 18 |
+
# Resto del código se mantiene igual hasta la sección de la interfaz Gradio
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
|
|
|
| 20 |
with gr.Blocks() as demo:
|
| 21 |
gr.Markdown("# Aplicación de Predicción de Precios de Acciones")
|
| 22 |
|
|
|
|
| 24 |
with gr.Column(scale=1):
|
| 25 |
ticker = gr.Dropdown(
|
| 26 |
choices=get_popular_tickers(),
|
| 27 |
+
value="AAPL", # Añadido valor por defecto
|
| 28 |
label="Selecciona el Símbolo de la Acción"
|
| 29 |
)
|
| 30 |
train_data_points = gr.Slider(
|
|
|
|
| 48 |
download_btn = gr.File(label="Descargar Predicciones")
|
| 49 |
|
| 50 |
def update_train_data_points(ticker):
|
| 51 |
+
try:
|
| 52 |
+
stock = yf.Ticker(ticker)
|
| 53 |
+
hist = stock.history(period="max")
|
| 54 |
+
total_points = len(hist)
|
| 55 |
+
return gr.Slider.update(
|
| 56 |
+
maximum=total_points,
|
| 57 |
+
value=min(1000, total_points),
|
| 58 |
+
visible=True
|
| 59 |
+
)
|
| 60 |
+
except Exception as e:
|
| 61 |
+
print(f"Error updating slider: {str(e)}")
|
| 62 |
+
return gr.Slider.update(visible=True) # Mantener slider visible en caso de error
|
| 63 |
|
| 64 |
ticker.change(
|
| 65 |
fn=update_train_data_points,
|
|
|
|
| 73 |
outputs=[plot_output, download_btn]
|
| 74 |
)
|
| 75 |
|
| 76 |
+
demo.launch()
|