Spaces:
Runtime error
Runtime error
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +35 -1
src/streamlit_app.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
|
|
|
|
|
| 1 |
import altair as alt
|
| 2 |
import numpy as np
|
| 3 |
import pandas as pd
|
|
@@ -37,4 +39,36 @@ st.altair_chart(alt.Chart(df, height=700, width=700)
|
|
| 37 |
y=alt.Y("y", axis=None),
|
| 38 |
color=alt.Color("idx", legend=None, scale=alt.Scale()),
|
| 39 |
size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
|
| 40 |
-
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
'''
|
| 2 |
+
|
| 3 |
import altair as alt
|
| 4 |
import numpy as np
|
| 5 |
import pandas as pd
|
|
|
|
| 39 |
y=alt.Y("y", axis=None),
|
| 40 |
color=alt.Color("idx", legend=None, scale=alt.Scale()),
|
| 41 |
size=alt.Size("rand", legend=None, scale=alt.Scale(range=[1, 150])),
|
| 42 |
+
))
|
| 43 |
+
|
| 44 |
+
'''
|
| 45 |
+
|
| 46 |
+
import streamlit as st
|
| 47 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
| 48 |
+
import torch
|
| 49 |
+
import torch.nn.functional as F
|
| 50 |
+
|
| 51 |
+
st.set_page_config(page_title="FinBERT Sentiment", layout="centered")
|
| 52 |
+
st.title("💰 FinBERT: Financial Sentiment Analysis")
|
| 53 |
+
st.markdown("Модель: `yiyanghkust/finbert-tone` — обучена на финансовых текстах")
|
| 54 |
+
|
| 55 |
+
@st.cache_resource
|
| 56 |
+
def load_model():
|
| 57 |
+
tokenizer = AutoTokenizer.from_pretrained("yiyanghkust/finbert-tone")
|
| 58 |
+
model = AutoModelForSequenceClassification.from_pretrained("yiyanghkust/finbert-tone")
|
| 59 |
+
return tokenizer, model
|
| 60 |
+
|
| 61 |
+
tokenizer, model = load_model()
|
| 62 |
+
|
| 63 |
+
# Ввод текста
|
| 64 |
+
text = st.text_area("Введите финансовую новость, заголовок или отчёт:", height=150)
|
| 65 |
+
|
| 66 |
+
if st.button("Анализировать тональность") and text.strip():
|
| 67 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
| 68 |
+
with torch.no_grad():
|
| 69 |
+
outputs = model(**inputs)
|
| 70 |
+
probs = F.softmax(outputs.logits, dim=1).squeeze()
|
| 71 |
+
|
| 72 |
+
labels = ["📉 Negative", "😐 Neutral", "📈 Positive"]
|
| 73 |
+
for label, prob in zip(labels, probs):
|
| 74 |
+
st.write(f"**{label}:** {prob.item():.3f}")
|