Spaces:
Runtime error
Runtime error
Update pages/Comparision.py
Browse files- pages/Comparision.py +50 -62
pages/Comparision.py
CHANGED
|
@@ -5,53 +5,53 @@ from transformers import pipeline
|
|
| 5 |
from rake_nltk import Rake
|
| 6 |
from nltk.corpus import stopwords
|
| 7 |
from fuzzywuzzy import fuzz
|
| 8 |
-
from openai import OpenAI
|
| 9 |
-
import os
|
| 10 |
-
from dotenv import load_dotenv
|
| 11 |
|
| 12 |
-
#
|
| 13 |
-
|
|
|
|
| 14 |
|
| 15 |
-
|
| 16 |
-
client = OpenAI(
|
| 17 |
-
base_url="https://api-inference.huggingface.co/v1",
|
| 18 |
-
api_key=os.environ.get('HFSecret') # Replace with your token
|
| 19 |
-
)
|
| 20 |
-
repo_id = "meta-llama/Meta-Llama-3-8B-Instruct"
|
| 21 |
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
# Define the options for the dropdown menu, selecting a remote txt file already created to analyze the text
|
| 25 |
-
options = ['None', 'Apprecitation Letter', 'Regret Letter', 'Kindness Tale', 'Lost Melody Tale', 'Twitter Example 1', 'Twitter Example 2']
|
| 26 |
|
| 27 |
# Create a dropdown menu to select options
|
| 28 |
selected_option = st.selectbox("Select a preset option", options)
|
| 29 |
|
| 30 |
# Define URLs for different options
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
"Twitter Example 2": "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_2.txt"
|
| 38 |
-
}
|
| 39 |
|
| 40 |
# Function to fetch text content based on selected option
|
| 41 |
def fetch_text_content(selected_option):
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
jd = fetch_text_content(selected_option)
|
| 46 |
|
| 47 |
-
#
|
| 48 |
-
nltk.download('punkt')
|
| 49 |
-
nltk.download('stopwords')
|
| 50 |
-
|
| 51 |
-
# Initialize transformer sentiment analysis pipeline
|
| 52 |
pipe_sent = pipeline('sentiment-analysis')
|
|
|
|
|
|
|
| 53 |
|
| 54 |
-
# Function to extract keywords
|
| 55 |
def extract_keywords(text):
|
| 56 |
r = Rake()
|
| 57 |
r.extract_keywords_from_text(text)
|
|
@@ -75,40 +75,28 @@ def extract_keywords(text):
|
|
| 75 |
seen_phrases.add(phrase)
|
| 76 |
return unique_keywords[:10]
|
| 77 |
|
| 78 |
-
#
|
| 79 |
text = st.text_area('Enter the text to analyze', jd)
|
| 80 |
|
|
|
|
| 81 |
if st.button("Start Analysis"):
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
# Transformers (Column 1)
|
| 85 |
-
with col1:
|
| 86 |
-
st.header("Transformers Model")
|
| 87 |
-
with st.spinner("Analyzing with Transformers..."):
|
| 88 |
out_sentiment = pipe_sent(text)
|
| 89 |
sentiment_score = out_sentiment[0]['score']
|
| 90 |
sentiment_label = out_sentiment[0]['label']
|
| 91 |
sentiment_emoji = 'π' if sentiment_label == 'POSITIVE' else 'π'
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
st.
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
temperature=0.5,
|
| 107 |
-
stream=True,
|
| 108 |
-
max_tokens=3000
|
| 109 |
-
)
|
| 110 |
-
response = ''.join([chunk['choices'][0]['text'] for chunk in stream])
|
| 111 |
-
st.write(response)
|
| 112 |
-
except Exception as e:
|
| 113 |
-
st.error("Error occurred while fetching response from Llama 3")
|
| 114 |
-
|
|
|
|
| 5 |
from rake_nltk import Rake
|
| 6 |
from nltk.corpus import stopwords
|
| 7 |
from fuzzywuzzy import fuzz
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
+
# Ensure NLTK resources are downloaded
|
| 10 |
+
nltk.download('punkt', quiet=True)
|
| 11 |
+
nltk.download('stopwords', quiet=True)
|
| 12 |
|
| 13 |
+
st.title("Exploring Torch, Transformers, Rake, and Others analyzing Text")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
+
# Define the options for the dropdown menu, Selecting a remote txt file already created to analyze the text
|
| 16 |
+
options = ['None','Apprecitation Letter', 'Regret Letter', 'Kindness Tale', 'Lost Melody Tale', 'Twitter Example 1', 'Twitter Example 2']
|
|
|
|
|
|
|
| 17 |
|
| 18 |
# Create a dropdown menu to select options
|
| 19 |
selected_option = st.selectbox("Select a preset option", options)
|
| 20 |
|
| 21 |
# Define URLs for different options
|
| 22 |
+
url_option1 = "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Appreciation_Letter.txt"
|
| 23 |
+
url_option2 = "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Regret_Letter.txt"
|
| 24 |
+
url_option3 = "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Kindness_Tale.txt"
|
| 25 |
+
url_option4 = "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Lost_Melody_Tale.txt"
|
| 26 |
+
url_option5 = "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_1.txt"
|
| 27 |
+
url_option6 = "https://raw.githubusercontent.com/peteciank/public_files/main/Transformers/Twitter_Example_2.txt"
|
|
|
|
|
|
|
| 28 |
|
| 29 |
# Function to fetch text content based on selected option
|
| 30 |
def fetch_text_content(selected_option):
|
| 31 |
+
if selected_option == 'Apprecitation Letter':
|
| 32 |
+
return requests.get(url_option1).text
|
| 33 |
+
elif selected_option == 'Regret Letter':
|
| 34 |
+
return requests.get(url_option2).text
|
| 35 |
+
elif selected_option == 'Kindness Tale':
|
| 36 |
+
return requests.get(url_option3).text
|
| 37 |
+
elif selected_option == 'Lost Melody Tale':
|
| 38 |
+
return requests.get(url_option4).text
|
| 39 |
+
elif selected_option == 'Twitter Example 1':
|
| 40 |
+
return requests.get(url_option5).text
|
| 41 |
+
elif selected_option == 'Twitter Example 2':
|
| 42 |
+
return requests.get(url_option6).text
|
| 43 |
+
else:
|
| 44 |
+
return ""
|
| 45 |
+
|
| 46 |
+
# Fetch text content based on selected option
|
| 47 |
jd = fetch_text_content(selected_option)
|
| 48 |
|
| 49 |
+
# Initialize pipeline for sentiment analysis
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
pipe_sent = pipeline('sentiment-analysis')
|
| 51 |
+
# Initialize pipeline for summarization
|
| 52 |
+
pipe_summ = pipeline("summarization", model="facebook/bart-large-cnn")
|
| 53 |
|
| 54 |
+
# Function to extract keywords and remove duplicates
|
| 55 |
def extract_keywords(text):
|
| 56 |
r = Rake()
|
| 57 |
r.extract_keywords_from_text(text)
|
|
|
|
| 75 |
seen_phrases.add(phrase)
|
| 76 |
return unique_keywords[:10]
|
| 77 |
|
| 78 |
+
# Display text content in a text area
|
| 79 |
text = st.text_area('Enter the text to analyze', jd)
|
| 80 |
|
| 81 |
+
# Run analysis when button is pressed
|
| 82 |
if st.button("Start Analysis"):
|
| 83 |
+
with st.spinner("Analyzing Sentiment"):
|
| 84 |
+
with st.expander("Sentiment Analysis - β
Completed", expanded=False):
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
out_sentiment = pipe_sent(text)
|
| 86 |
sentiment_score = out_sentiment[0]['score']
|
| 87 |
sentiment_label = out_sentiment[0]['label']
|
| 88 |
sentiment_emoji = 'π' if sentiment_label == 'POSITIVE' else 'π'
|
| 89 |
+
sentiment_text = f"Sentiment Score: {sentiment_score}, Sentiment Label: {sentiment_label.capitalize()} {sentiment_emoji}"
|
| 90 |
+
st.write(sentiment_text)
|
| 91 |
+
|
| 92 |
+
with st.spinner("Summarizing - This may take a while"):
|
| 93 |
+
with st.expander("Summarization - β
Completed", expanded=False):
|
| 94 |
+
out_summ = pipe_summ(text)
|
| 95 |
+
summarized_text = out_summ[0]['summary_text']
|
| 96 |
+
st.write(summarized_text)
|
| 97 |
+
|
| 98 |
+
with st.spinner("Extracting Keywords"):
|
| 99 |
+
with st.expander("Keywords Extraction - β
Completed", expanded=False):
|
| 100 |
+
keywords = extract_keywords(text)
|
| 101 |
+
keyword_list = [keyword[1] for keyword in keywords]
|
| 102 |
+
st.write(keyword_list)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|