Spaces:
Configuration error
Configuration error
| import streamlit as st | |
| import pandas as pd | |
| import plotly.express as px | |
| import datetime | |
| from Scheduler.mailer_quant import Mailer | |
| from sqlalchemy import create_engine | |
| import psycopg2 | |
| import graphviz as graphviz | |
| import plotly.graph_objects as go | |
| from logs_portal import log | |
| import io | |
| import boto3 | |
| from Data.credentials import credentials_s3 as creds3 | |
| from Data.credentials import credentials_postgresql as credpost | |
| from st_aggrid import GridOptionsBuilder, AgGrid, GridUpdateMode, DataReturnMode, JsCode | |
| def save_s3(key, secret_key, bucket, df, path): | |
| with io.BytesIO() as output: | |
| with pd.ExcelWriter(output, engine='xlsxwriter') as writer: | |
| df.to_excel(writer, index=False) | |
| data = output.getvalue() | |
| s3 = boto3.resource('s3', aws_access_key_id=key, | |
| aws_secret_access_key=secret_key) | |
| s3.Bucket(bucket).put_object(Key=path, Body=data) | |
| def read_excel_s3(key, secret_key, bucket, path): | |
| s3_client = boto3.client('s3', aws_access_key_id=key, | |
| aws_secret_access_key=secret_key) | |
| response = s3_client.get_object(Bucket=bucket, Key=path) | |
| data = response["Body"].read() | |
| df = pd.read_excel(io.BytesIO(data), engine='openpyxl') | |
| return df | |
| def display_table(df: pd.DataFrame): | |
| # Configure AgGrid options | |
| gb = GridOptionsBuilder.from_dataframe(df) | |
| gb.configure_selection(selection_mode="single", use_checkbox=True,) | |
| return AgGrid( | |
| df, gridOptions=gb.build(), | |
| update_mode=GridUpdateMode.SELECTION_CHANGED, | |
| enable_enterprise_modules=True) | |
| def style_table(): | |
| style_table = """ | |
| <style> | |
| tbody tr:hover { | |
| color:#BB1114;} | |
| thead { | |
| background-color:#BB1114 ; | |
| color: #E8E8E8; | |
| } | |
| tbody tr:nth-child(odd) { | |
| background-color: #fff; | |
| } | |
| # tbody tr:nth-child(even) { | |
| # background-color: #eee; | |
| # } | |
| tbody tr:nth-child(odd) | |
| stTable { | |
| border-collapse: collapse; | |
| margin: 25px 0; | |
| font-size: 0.9em; | |
| min-width: 400px; | |
| box-shadow: 0 0 20px rgba(0, 0, 0, 0.15); | |
| } | |
| </style> | |
| """ | |
| st.markdown(style_table, unsafe_allow_html=True) | |
| def mostrar_tabla(info_fil, placeholder, select): | |
| info_fil2 = info_fil.copy() | |
| info_fil2 = info_fil2[select] | |
| placeholder.table(info_fil2) | |
| def leer_notas(): | |
| url = credpost["POSTGRESQL"] | |
| engine = create_engine(url, echo=False) | |
| data = pd.read_sql_query("SELECT * FROM notas_analistas", con=engine) | |
| data.columns = ["Analista", "Comentario", "Date", "Empresa", "ID_Quant", | |
| "LV1", "Nota", | |
| "Pais", "Ticker Bloomberg", "Tipo de Comentario"] | |
| data.index = pd.to_datetime(data['Date']).dt.strftime('%d/%m/%Y') | |
| data.index.name = "Fecha" | |
| data = data.sort_index(ascending=False) | |
| return data | |
| def ver_nota(): | |
| select = ["Analista", "Tipo de Comentario", "Empresa", "Pais", "Nota", | |
| "Comentario"] | |
| key = creds3["S3_KEY_ID"] | |
| secret_key = creds3["S3_SECRET_KEY"] | |
| bucket = creds3["S3_BUCKET"] | |
| path ="Analistas Empresa.xlsx" | |
| style_table() | |
| col1, col2, col3, col4 = st.columns(4) | |
| data = leer_notas() | |
| select = ["Analista", "Pais", "LV1", "Empresa", "Nota", | |
| "Comentario"] | |
| data3 = read_excel_s3(key, secret_key, bucket, path) | |
| data4 = data3.copy() | |
| autores = sorted(list(set(data4["Analista"].dropna()))) | |
| Autor = col1.selectbox("Analista", ["-"] + autores) | |
| if Autor != "-": | |
| data4 = data4[data4["Analista"] == Autor] | |
| data = data[data["Analista"] == Autor] | |
| pais = sorted(list(set(data4["Pais"].dropna()))) | |
| Pais = col2.selectbox("Pais", ["-"] + pais) | |
| if Pais != "-": | |
| data4 = data4[data4["Pais"] == Pais] | |
| data = data[data["Pais"] == Pais] | |
| industria = sorted(list(set(data4["LV1"].dropna()))) | |
| Industria = col3.selectbox("Industria", ["-"] + industria) | |
| if Industria != "-": | |
| data4 = data4[data4["LV1"] == Industria] | |
| data = data[data["LV1"] == Industria] | |
| empresa = sorted(list(set(data4["Empresa"].dropna()))) | |
| Empresa = col4.selectbox("Empresa", ["-"] + empresa) | |
| if Empresa != "-": | |
| data4 = data4[data4["Empresa"] == Empresa] | |
| data = data[data["Empresa"] == Empresa] | |
| info_fil = data | |
| Ordenar_por = col1.selectbox("Ordenar por", ["Date", "Analista", "Pais", | |
| "LV1", "Empresa"]) | |
| mayor = col2.selectbox("Asc o desc", ["Descendiente", | |
| "Ascendiente"]) | |
| fec_i = col3.date_input('Fecha de inicio', datetime.date(2021, 7, 1)) | |
| fec_f = col4.date_input('Fecha final') | |
| placeholder = st.empty() | |
| if mayor != "-" and Ordenar_por != "-": | |
| var = Ordenar_por | |
| if mayor == "Ascendiente": | |
| info_fil = info_fil.sort_values(var, ascending=True) | |
| else: | |
| info_fil = info_fil.sort_values(var, ascending=False) | |
| info_fil = info_fil[info_fil["Date"].dt.date >= fec_i] | |
| info_fil = info_fil[info_fil["Date"].dt.date <= fec_f] | |
| mostrar_tabla(info_fil, placeholder, select) | |
| def read_mapeo_analistas(): | |
| key = creds3["S3_KEY_ID"] | |
| secret_key = creds3["S3_SECRET_KEY"] | |
| bucket = creds3["S3_BUCKET"] | |
| path = "Analistas Empresa.xlsx" | |
| return read_excel_s3(key, secret_key, bucket, path) | |
| def read_company_db(): | |
| company_db = pd.pandas.read_excel("Data/Company_Base_Definitivo.xlsx", | |
| sheet_name="Compilado", | |
| engine="openpyxl") | |
| return company_db | |
| def aggrid_notas(data): | |
| gb = GridOptionsBuilder.from_dataframe(data) | |
| # make columns editable | |
| gb.configure_columns(["Nota", | |
| "Comentario"], editable=True) | |
| gb.configure_column('Nota', | |
| cellEditor='agRichSelectCellEditor', | |
| cellEditorParams={'values': [1, 2, 3, 4, 5]} | |
| ) | |
| js = JsCode(""" | |
| function(e) { | |
| let api = e.api; | |
| let rowIndex = e.rowIndex; | |
| let col = e.column.colId; | |
| let rowNode = api.getDisplayedRowAtIndex(rowIndex); | |
| api.flashCells({ | |
| rowNodes: [rowNode], | |
| columns: [col], | |
| flashDelay: 10000000000 | |
| }); | |
| }; | |
| """) | |
| gb.configure_grid_options(onCellValueChanged=js) | |
| go = gb.build() | |
| return AgGrid(data, gridOptions=go, key='grid1', | |
| allow_unsafe_jscode=True, | |
| reload_data=False, | |
| fit_columns_on_grid_load=False, | |
| enable_enterprise_modules=True) | |
| def ingresar_nota(): | |
| try: | |
| notas_df = leer_notas() | |
| companydb_df = read_company_db() | |
| companies_assigned_df = read_mapeo_analistas() | |
| notas_df["Comentario"] = notas_df["Comentario"].fillna(" ") | |
| analista = st.session_state["name"] | |
| empresa_analista = sorted( | |
| list(set(companies_assigned_df[companies_assigned_df["Analista"] | |
| == analista]["Empresa"]))) | |
| companies_fil = companies_assigned_df[ | |
| companies_assigned_df["Analista"] == analista] | |
| data_fil_a = notas_df[notas_df["Analista"] == analista].drop_duplicates( | |
| "Empresa") | |
| data_fil = data_fil_a[["Empresa", "Nota", "Comentario"]] | |
| data_fil = companies_fil.merge(data_fil, | |
| on="Empresa", | |
| how='left').reset_index() | |
| data_fil["Nota"] = data_fil["Nota"].fillna(0) | |
| data_fil["Comentario"] = data_fil["Comentario"].fillna(" ") | |
| data_fil = data_fil[["Empresa", | |
| "Pais", | |
| "LV1", | |
| "Nota", | |
| "Comentario"]].sort_values("LV1") | |
| notas = aggrid_notas(data_fil) | |
| notas_f = notas["data"] | |
| update_notas = [] | |
| for emp in notas_f.Empresa: | |
| new_df = notas_f[notas_f.Empresa == emp] | |
| old_df = data_fil[data_fil.Empresa == emp] | |
| new_nota = new_df.iloc[0].Nota | |
| new_comentario = new_df.iloc[0].Comentario | |
| old_nota = old_df.iloc[0].Nota | |
| old_comentario = old_df.iloc[0].Comentario | |
| if old_nota != new_nota or old_comentario != new_comentario: | |
| update_notas.append(notas_f[notas_f.Empresa == emp]) | |
| update_final = pd.concat(update_notas) | |
| st.write(update_final) | |
| submitted_2 = st.button("Update Notas") | |
| if submitted_2: | |
| today = datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S') | |
| for emp in update_final.Empresa: | |
| df_emp = update_final[update_final.Empresa==emp] | |
| empresa_df = companydb_df[companydb_df['Short_Name']==emp].iloc[0] | |
| pais = empresa_df['Portfolio_Country'] | |
| industria = empresa_df["LV1"] | |
| id_quant = empresa_df["ID_Quant"] | |
| tbloom = empresa_df['Ticker Bloomberg'] | |
| comentario = df_emp.iloc[0]["Comentario"] | |
| nota = df_emp.iloc[0]["Nota"] | |
| var = """(analista, comentario, date_nota, empresa, id_quant, lv1, | |
| nota, pais, ticker_bloomberg)""" | |
| if "'" in emp: | |
| emp = emp.replace("'", "") | |
| varlist = [analista, comentario, today, emp, id_quant, | |
| industria, nota, pais, tbloom] | |
| # try: | |
| url = credpost["POSTGRESQL"] | |
| conn = psycopg2.connect(url, sslmode='require') | |
| cur = conn.cursor() | |
| cur.execute("INSERT INTO notas_test {Var} VALUES %r; ". | |
| format(Var=var) % (tuple(varlist),)) | |
| conn.commit() | |
| cur.close() | |
| conn.close() | |
| st.info("Nota ingresada exitosamente") | |
| if emp != "-": | |
| asunto = "Actualizacion nota " + emp + " - " + analista | |
| mensaje = analista + " ha actualizado la nota de la empresa " + emp + " a " + str(nota) | |
| else: | |
| asunto = "Actualizacion nota " + industria + " - " + analista | |
| mensaje = analista + " ha actualizado la nota de la industria " + industria + " a " + str(nota) | |
| destinatario = st.session_state['mail'] | |
| mail = Mailer(asunto, mensaje, "", "bullm@larrainvial.com") | |
| mail.send_message([#destinatario, | |
| "bullm@larrainvial.com,", | |
| "benjamin.ull.m@gmail.com"]) | |
| # except: | |
| # st.error("Problemas al ingresar la nota") | |
| # asunto = "Actualizacion nota " + emp + " - " + analista | |
| # mensaje = analista + " ha tenido problemas con la nota de" + emp + " a " + str(nota) | |
| # mail = Mailer(asunto, mensaje, "", "bullm@larrainvial.com") | |
| # mail.send_message(["bullm@larrainvial.com,", | |
| # "benjamin.ull.m@gmail.com"]) | |
| st.experimental_memo.clear() | |
| except Exception as exc: | |
| st.write(exc) | |
| def ingresar_nota_ex(): | |
| Analistas = { | |
| "fsutter": "Florencia Sutter", | |
| "alehmann": "Alejandro Lehmann", | |
| "bcosoi": "Benjamín Cosoi", | |
| "chinojosa": "Carlos Hinojosa", | |
| "gcatalan": "Gustavo Catalan", | |
| "bull": "Benjamin Ul", | |
| "ftaverne": "Francisca Taverne" | |
| } | |
| notas_df = leer_notas() | |
| companydb_df = read_company_db() | |
| companies_assigned_df = read_mapeo_analistas() | |
| notas_df["Comentario"] = notas_df["Comentario"].fillna(" ") | |
| if st.session_state.key in list(Analistas.keys()): | |
| analista = Analistas[st.session_state.key] | |
| data_analista = companies_assigned_df[companies_assigned_df["Analista"] == analista] | |
| industrias_analista = sorted(list( | |
| set(companies_assigned_df[companies_assigned_df["Analista"] | |
| == analista]["LV1"]))) | |
| empresa_analista = sorted(list( | |
| set(companies_assigned_df[companies_assigned_df["Analista"] | |
| == analista]["Empresa"]))) | |
| else: | |
| analista = st.session_state.key | |
| data_analista = companies_assigned_df[ | |
| companies_assigned_df["Analista"] == analista] | |
| industrias_analista = sorted( | |
| list(set(companies_assigned_df[companies_assigned_df["Analista"] | |
| == analista]["LV1"]))) | |
| empresa_analista = sorted( | |
| list(set(companies_assigned_df[companies_assigned_df["Analista"] | |
| == analista]["Empresa"]))) | |
| Countries = sorted(list(set(data_analista["Pais"]))) | |
| LV1s = sorted(list(set(data_analista["LV1"]))) | |
| industrias = [] | |
| for c in Countries: | |
| for l in LV1s: | |
| industrias.append(c + " - " + l) | |
| col1, col2 = st.columns(2) | |
| placeholder = col2.empty() | |
| companies_fil = companies_assigned_df[ | |
| companies_assigned_df["Analista"] == analista] | |
| data_fil_a = notas_df[notas_df["Analista"] == analista].drop_duplicates( | |
| "Empresa") | |
| data_fil = data_fil_a[["Empresa", "Nota"]] | |
| data_fil = companies_fil.merge(data_fil, | |
| on="Empresa", | |
| how='left').fillna(0) | |
| with col1: | |
| notas_df2 = notas_df | |
| porc_total = (notas_df.drop_duplicates("Empresa")["Nota"]>0).sum()/len(notas_df) | |
| porc_emp_notas = (data_fil["Nota"] > 0).sum()/len(data_fil)*100 | |
| delta_per = round(porc_total - porc_emp_notas, 2) | |
| st.metric("% de empresas con nota", round(porc_emp_notas,2), delta_per) | |
| notas_table = display_table(data_fil[["Empresa", | |
| "Pais", | |
| "LV1", | |
| "Nota"]].sort_values("LV1")) | |
| if len(notas_table["selected_rows"]) > 0: | |
| emp_name = notas_table["selected_rows"][0]["Empresa"] | |
| st.subheader("Comentario") | |
| data_emp_df = data_fil_a[data_fil_a["Empresa"] == emp_name] | |
| if len(data_emp_df) > 0: | |
| st.write(data_emp_df.iloc[0]["Comentario"]) | |
| id_quant = data_emp_df.iloc[0]["ID_Quant"] | |
| country = data_emp_df.iloc[0]["Pais"] | |
| with placeholder.form("my_form2", True): | |
| # col1, col2, col3, col4 = st.columns((3, 8, 2, 1.5)) | |
| Empresas = ["-"]+sorted(list(empresa_analista) + industrias) | |
| st.markdown( | |
| '<p style="font-size:12px; padding-left:0px; margin-bottom:0px;">Analista</p>', | |
| unsafe_allow_html=True) | |
| st.markdown('<h3 style="padding-left:0px;; margin-bottom:0px;"">'+analista+"</h3>", | |
| unsafe_allow_html=True) | |
| if len(notas_table["selected_rows"]) <1: | |
| empresa = st.selectbox('Empresa', Empresas) | |
| else: | |
| empresa = notas_table["selected_rows"][0]["Empresa"] | |
| st.subheader(empresa) | |
| tipo_comentario = "Nota" | |
| nota = st.selectbox("Nota", [0, 1, 2, 3, 4, 5]) | |
| comentario = st.text_area('Comentario') | |
| submitted_2 = st.form_submit_button("Publicar ") | |
| var = """(analista, comentario, date_nota, empresa, id_quant, lv1, | |
| nota, pais, ticker_bloomberg)""" | |
| today = datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S') | |
| if submitted_2: | |
| st.experimental_memo.clear() | |
| empresa_df = companydb_df[companydb_df['Short_Name']==empresa].iloc[0] | |
| pais = empresa_df['Portfolio_Country'] | |
| industria = empresa_df["LV1"] | |
| id_quant = empresa_df["ID_Quant"] | |
| tbloom = empresa_df['Ticker Bloomberg'] | |
| if "'" in empresa: | |
| empresa = empresa.replace("'", "") | |
| varlist = [analista, comentario, today, empresa, id_quant, | |
| industria, nota, pais, tbloom] | |
| try: | |
| url = credpost["POSTGRESQL"] | |
| conn = psycopg2.connect(url, sslmode='require') | |
| cur = conn.cursor() | |
| cur.execute("INSERT INTO notas_analistas {Var} VALUES %r; ". | |
| format(Var=var) % (tuple(varlist),)) | |
| conn.commit() | |
| cur.close() | |
| conn.close() | |
| st.info("Nota ingresada exitosamente") | |
| if empresa != "-": | |
| asunto = "Actualizacion nota " + empresa + " - " + analista | |
| mensaje = analista + " ha actualizado la nota de la empresa " + empresa + " a " + str(nota) | |
| else: | |
| asunto = "Actualizacion nota " + industria + " - " + analista | |
| mensaje = analista + " ha actualizado la nota de la industria " + industria + " a " + str(nota) | |
| destinatario = st.session_state['mail'] | |
| mail = Mailer(asunto, mensaje, "", "bullm@larrainvial.com") | |
| mail.send_message([destinatario, | |
| "bullm@larrainvial.com,", | |
| "benjamin.ull.m@gmail.com"]) | |
| except: | |
| st.error("Problemas al ingresar la nota") | |
| asunto = "Actualizacion nota " + empresa + " - " + analista | |
| mensaje = analista + " ha tenido problemas con la nota de" + empresa + " a " + str(nota) | |
| mail = Mailer(asunto, mensaje, "", "bullm@larrainvial.com") | |
| mail.send_message(["bullm@larrainvial.com,", | |
| "benjamin.ull.m@gmail.com"]) | |
| notas_df = leer_notas() | |
| # st.write(data_fil.columns) | |
| # st.table(data_fil[["Analista","Empresa", "LV1","Nota", "Comentario"]]) | |
| def estadisticas(): | |
| data = leer_notas() | |
| st.subheader("Distribución de notas") | |
| data = data[data["Nota"] != 0] | |
| col1, col2 = st.columns((1.681, 1)) | |
| place = col1.empty() | |
| val = col2.selectbox("Seleccione un analista", list(set(data["Analista"].dropna()))) | |
| data_fil = data[data["Analista"] == val] | |
| data_fil["count"] = 1 | |
| data_fil2 = data_fil.groupby(by=["Nota"], | |
| as_index=False).agg({"count": "sum"}) | |
| data_fil3 = data.sort_values("Date", ascending=False) | |
| data_fil3 = data_fil3[data_fil3["ID_Quant"] == 0] | |
| data_fil4 = data_fil3.groupby(by=["LV1", "Pais", "Empresa", "Date"], | |
| as_index=False).agg({"Nota": "mean"}) | |
| data_fil3 = data_fil3.groupby(by=["LV1"], | |
| as_index=False).agg({"Nota": "mean"}) | |
| l = [] | |
| for i in range(len(data_fil2)): | |
| l.append(str(round(data_fil2.iloc[i]["Nota"]))) | |
| data_fil2["Nota "] = l | |
| fig = px.bar(data_fil2, x="Nota ", y="count", | |
| color_discrete_sequence=['indianred']) | |
| fig.update_layout(bargap=0.2) | |
| place.plotly_chart(fig, use_container_width=True) | |
| col2.header("Media = " + str(round(sum(data_fil2["Nota"]*data_fil2["count"])/sum(data_fil2["count"]),1))) | |
| data["Datetime"] = pd.to_datetime(data["Date"], format='%Y-%m-%d %H:%M:%S', | |
| errors='ignore') | |
| data_fil4["Datetime"] = pd.to_datetime(data_fil4["Date"], | |
| format='%Y-%m-%d %H:%M:%S', | |
| errors='ignore') | |
| st.subheader("Evolución por empresa") | |
| col1, col2 = st.columns((2, 1)) | |
| placeholder = col1.empty() | |
| pais = col2.selectbox("Seleccione un pais", | |
| ["-"] + sorted(list(set(data["Pais"].dropna())))) | |
| if pais != "-": | |
| data2 = data[data["Pais"] == pais] | |
| else: | |
| data2 = data | |
| industria = col2.selectbox("Seleccione una industria", | |
| ["-"] + sorted(list(set(data2["LV1"].dropna())))) | |
| if industria != "-": | |
| data2 = data2[data2["LV1"] == industria] | |
| else: | |
| data2=data2 | |
| empr = col2.selectbox("Seleccione una empresa", | |
| ["-"] + sorted(list(set(data2["Empresa"].dropna())))) | |
| if empr != "-": | |
| notas_empr = data[data["Empresa"] == empr] | |
| elif empr == "-" and pais == "-" and industria != "-": | |
| notas_empr = data_fil4[data_fil4["LV1"]==industria] | |
| elif empr == "-" and pais != "-" and industria == "-": | |
| notas_empr = data_fil4[data_fil4["Pais"]==pais] | |
| else: | |
| notas_empr = data2 | |
| date_range = pd.date_range(notas_empr['Datetime'].min() - datetime.timedelta(days=4), | |
| datetime.datetime.today() + datetime.timedelta(days=1)) | |
| hist_notas = pd.DataFrame(index=date_range) | |
| if empr == "-" and pais == "-" and industria == "-": | |
| placeholder.empty() | |
| else: | |
| for empresa in list(set(notas_empr["Empresa"])): | |
| l = [] | |
| a = 0 | |
| for i in list(date_range): | |
| data3 = notas_empr[notas_empr["Empresa"] == empresa] | |
| data3 = data3[data3["Datetime"].dt.date == i] | |
| if len(data3) == 0: | |
| l.append(a) | |
| else: | |
| a = data3.iloc[0]["Nota"] | |
| l.append(a) | |
| hist_notas[empresa] = l | |
| hist_notas["Date"] = list(hist_notas.index) | |
| fig2 = px.line(hist_notas, x="Date", y=hist_notas.columns) | |
| fig2.update_traces(textposition="bottom right") | |
| placeholder.plotly_chart(fig2, use_container_width=True) | |
| col2.header(" ") | |
| col2.header(" ") | |
| col1, col2 = st.columns((2, 1)) | |
| col1.subheader("Promedio por industria") | |
| add_pais = col2.selectbox("Añadir Pais", ["-"] + list(set(data_fil4["Pais"]))) | |
| if add_pais != "-": | |
| data_fil4 = data_fil4[data_fil4["Pais"] == add_pais] | |
| data_fil4 = data_fil4.sort_values(by = "Datetime", ascending = False) | |
| data_fil4 = data_fil4.drop_duplicates("LV1") | |
| fig3 = go.Figure(data=[ | |
| go.Bar(name='General', x=data_fil3["LV1"], y=data_fil3["Nota"]), | |
| go.Bar(name=add_pais, x=data_fil4["LV1"], y=data_fil4["Nota"]) | |
| ]) | |
| fig3.update_yaxes(range=[min(min(data_fil3['Nota']), | |
| min(data_fil4['Nota']))/1.1, | |
| max(max(data_fil3['Nota']), | |
| max(data_fil4['Nota']))*1.1]) | |
| else: | |
| fig3 = go.Figure(data=[ | |
| go.Bar(name='General', x=data_fil3["LV1"], y=data_fil3["Nota"]) | |
| ]) | |
| fig3.update_yaxes(range=[min(data_fil3['Nota'])/1.1, | |
| max(data_fil3['Nota'])*1.1]) | |
| data_fil3 = data_fil3.sort_values("Nota") | |
| fig3.update_layout(barmode='group', | |
| xaxis={'categoryorder': 'total descending'}) | |
| st.plotly_chart(fig3, use_container_width=True) | |
| def asignar_analista(): | |
| key = creds3["S3_KEY_ID"] | |
| secret_key = creds3["S3_SECRET_KEY"] | |
| bucket = creds3["S3_BUCKET"] | |
| path ="Analistas Empresa.xlsx" | |
| analista_emp = read_excel_s3(key, secret_key, bucket, path) | |
| # analista_emp = pd.read_excel("Data/Analistas Empresa.xlsx", engine='openpyxl') | |
| Analistas = { | |
| "fsutter": "Florencia Sutter", | |
| "alehmann": "Alejandro Lehmann", | |
| "bcosoi": "Benjamín Cosoi", | |
| "chinojosa": "Carlos Hinojosa", | |
| "gcatalan": "Gustavo Catalan", | |
| "bull": "Benjamin Ull", | |
| "ftaverne": "Francisca Taverne" | |
| } | |
| analista_emp2 = analista_emp[["ID_Quant", "Analista", "Empresa", "LV1", | |
| "Pais"]] | |
| credenciales = pd.read_csv("Data/Credenciales_h.csv", | |
| names=['Usuario', 'Password', 'Area', | |
| 'Cargo','Mail','Nombre']) | |
| analistas = credenciales[credenciales["Cargo"] == "Investment Analyst"] | |
| col1, col2, col3 = st.columns(3) | |
| analista = col1.selectbox("Analista", | |
| sorted(list(set(Analistas.values())))) | |
| industria = col2.selectbox("Industria", | |
| sorted(list(set(analista_emp["LV1"])))) | |
| col3.title(" ") | |
| val = col3.checkbox("Seleccionar todos", value=True) | |
| dicc ={} | |
| with st.form("form"): | |
| col1, col2 = st.columns(2) | |
| col1.write("Asignar a: ") | |
| col2.write("Industria: ") | |
| col1.subheader(analista) | |
| col2.subheader(industria) | |
| col1.header(" ") | |
| col2.header(" ") | |
| col1, col2 = st.columns(2) | |
| empresas = analista_emp[analista_emp["LV1"]==industria]["Empresa"] | |
| i = 0 | |
| for empresa in empresas: | |
| if i%2 == 0: | |
| dicc[empresa] = col1.checkbox(empresa, value=val) | |
| i += 1 | |
| else: | |
| dicc[empresa] = col2.checkbox(empresa, value=val) | |
| i += 1 | |
| submit = st.form_submit_button("Asignar") | |
| if submit: | |
| for empresa in dicc.keys(): | |
| if dicc[empresa]: | |
| cambio = analista_emp2[analista_emp2["Empresa"] == empresa] | |
| analista_emp2.loc[analista_emp2.Empresa == empresa, 'Analista'] = analista | |
| save_s3(key, secret_key, bucket, analista_emp2, path) | |
| style_table() | |
| data_f = analista_emp2[["Analista", "Pais", "LV1", "Empresa"]] | |
| data_f = data_f[data_f["Analista"] == analista] | |
| graph = graphviz.Digraph() | |
| for industry in list(set(data_f["LV1"])): | |
| graph.edge(analista, industry) | |
| d_ind = data_f[data_f["LV1"]==industry] | |
| st.subheader("Mapeo Analista - Industrias") | |
| st.graphviz_chart(graph) | |
| d_ind = analista_emp2[["Analista", "Pais", "LV1", "Empresa"]] | |
| d_ind = d_ind[d_ind["LV1"] == industria] | |
| st.subheader("Analistas asignados a la industria " + industria) | |
| st.table(d_ind) | |
| def save_password(): | |
| key = creds3["S3_KEY_ID"] | |
| secret_key = creds3["S3_SECRET_KEY"] | |
| bucket = creds3["S3_BUCKET"] | |
| path ='Claves.xlsx' | |
| claves = read_excel_s3(key, secret_key, bucket, path) | |
| col1, col2 = st.columns((1, 1.5)) | |
| with col1.form('Nueva clave'): | |
| new_user = st.text_input("Ingresar usuario") | |
| password = st.text_input("Ingresar clave") | |
| plataforma = st.text_input("Plataforma") | |
| submitted = st.form_submit_button('Ingresar') | |
| if submitted: | |
| claves = claves.append({"Clave": password, | |
| "Usuario": new_user, | |
| "Plataforma": plataforma | |
| }, ignore_index=True) | |
| save_s3(key, secret_key, bucket, claves, path) | |
| style_table() | |
| claves.index = claves['Plataforma'] | |
| col2.table(claves[['Usuario','Clave']]) | |