Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,198 +1 @@
|
|
| 1 |
-
##########################################
|
| 2 |
-
# Step 0: Import required libraries
|
| 3 |
-
##########################################
|
| 4 |
-
import streamlit as st # For building the web application interface
|
| 5 |
-
from transformers import (
|
| 6 |
-
pipeline,
|
| 7 |
-
SpeechT5Processor,
|
| 8 |
-
SpeechT5ForTextToSpeech,
|
| 9 |
-
SpeechT5HifiGan,
|
| 10 |
-
AutoModelForCausalLM,
|
| 11 |
-
AutoTokenizer
|
| 12 |
-
) # For sentiment analysis, text-to-speech, and text generation
|
| 13 |
-
from datasets import load_dataset # For loading datasets (e.g., speaker embeddings)
|
| 14 |
-
import torch # For tensor operations
|
| 15 |
-
import soundfile as sf # For saving audio as .wav files
|
| 16 |
-
import sentencepiece # Required by SpeechT5Processor for tokenization
|
| 17 |
-
|
| 18 |
-
##########################################
|
| 19 |
-
# Streamlit application title and input
|
| 20 |
-
##########################################
|
| 21 |
-
# Display a colorful, large title in a visually appealing font
|
| 22 |
-
st.markdown(
|
| 23 |
-
"<h1 style='text-align: center; color: #FF5720; font-size: 50px;'>Just Comment</h1>",
|
| 24 |
-
unsafe_allow_html=True
|
| 25 |
-
) # Use HTML and CSS for a custom title design
|
| 26 |
-
|
| 27 |
-
# Display a smaller, gentle subtitle below the title
|
| 28 |
-
st.markdown(
|
| 29 |
-
"<h3 style='text-align: center; color: #5D6D7E; font-style: italic;'>I'm listening to you, my friend</h3>",
|
| 30 |
-
unsafe_allow_html=True
|
| 31 |
-
) # Use HTML for a friendly and soft-styled subtitle
|
| 32 |
-
|
| 33 |
-
# Add a well-designed text area for user input
|
| 34 |
-
text = st.text_area(
|
| 35 |
-
"Enter your comment",
|
| 36 |
-
placeholder="Type something here...",
|
| 37 |
-
height=150,
|
| 38 |
-
help="Write a comment you would like us to analyze and respond to!" # Provide a helpful tooltip
|
| 39 |
-
)
|
| 40 |
-
|
| 41 |
-
##########################################
|
| 42 |
-
# Step 1: Sentiment Analysis Function
|
| 43 |
-
##########################################
|
| 44 |
-
def analyze_dominant_emotion(user_review):
|
| 45 |
-
"""
|
| 46 |
-
Analyze the dominant emotion in the user's comment using a fine-tuned text classification model.
|
| 47 |
-
"""
|
| 48 |
-
emotion_classifier = pipeline(
|
| 49 |
-
"text-classification",
|
| 50 |
-
model="Thea231/jhartmann_emotion_finetuning",
|
| 51 |
-
return_all_scores=True
|
| 52 |
-
) # Load the fine-tuned text classification model from Hugging Face
|
| 53 |
-
|
| 54 |
-
emotion_results = emotion_classifier(user_review)[0] # Perform sentiment analysis on the input text
|
| 55 |
-
dominant_emotion = max(emotion_results, key=lambda x: x['score']) # Identify the emotion with the highest confidence
|
| 56 |
-
return dominant_emotion # Return the dominant emotion (label and score)
|
| 57 |
-
|
| 58 |
-
##########################################
|
| 59 |
-
# Step 2: Response Generation Function
|
| 60 |
-
##########################################
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
def response_gen(user_review):
|
| 64 |
-
"""
|
| 65 |
-
Generate a concise and logical response based on the sentiment of the user's comment.
|
| 66 |
-
"""
|
| 67 |
-
dominant_emotion = analyze_dominant_emotion(user_review) # Get the dominant emotion of the user's comment
|
| 68 |
-
emotion_label = dominant_emotion['label'].lower() # Extract the emotion label in lowercase format
|
| 69 |
-
|
| 70 |
-
# Define response templates for each emotion
|
| 71 |
-
emotion_prompts = {
|
| 72 |
-
"anger": (
|
| 73 |
-
f"'{user_review}'\n\n"
|
| 74 |
-
"As a customer service representative, craft a professional response that:\n"
|
| 75 |
-
"- Begins with sincere apology and acknowledgment\n"
|
| 76 |
-
"- Clearly explains solution process with concrete steps\n"
|
| 77 |
-
"- Offers appropriate compensation/redemption\n"
|
| 78 |
-
"- Keeps tone humble and solution-focused (3-4 sentences)\n\n"
|
| 79 |
-
"Response:"
|
| 80 |
-
),
|
| 81 |
-
"disgust": (
|
| 82 |
-
f"'{user_review}'\n\n"
|
| 83 |
-
"As a customer service representative, craft a response that:\n"
|
| 84 |
-
"- Immediately acknowledges the product issue\n"
|
| 85 |
-
"- Explains quality control measures being taken\n"
|
| 86 |
-
"- Provides clear return/replacement instructions\n"
|
| 87 |
-
"- Offers goodwill gesture (3-4 sentences)\n\n"
|
| 88 |
-
"Response:"
|
| 89 |
-
),
|
| 90 |
-
"fear": (
|
| 91 |
-
f"'{user_review}'\n\n"
|
| 92 |
-
"As a customer service representative, craft a reassuring response that:\n"
|
| 93 |
-
"- Directly addresses the safety worries\n"
|
| 94 |
-
"- References relevant certifications/standards\n"
|
| 95 |
-
"- Offers dedicated support contact\n"
|
| 96 |
-
"- Provides satisfaction guarantee (3-4 sentences)\n\n"
|
| 97 |
-
"Response:"
|
| 98 |
-
),
|
| 99 |
-
"joy": (
|
| 100 |
-
f"'{user_review}'\n\n"
|
| 101 |
-
"As a customer service representative, craft a concise and enthusiastic response that:\n"
|
| 102 |
-
"- Thanks the customer for their feedback\n"
|
| 103 |
-
"- Acknowledges both positive and constructive comments\n"
|
| 104 |
-
"- Invites them to explore loyalty programs\n\n"
|
| 105 |
-
"Response:"
|
| 106 |
-
),
|
| 107 |
-
"neutral": (
|
| 108 |
-
f"'{user_review}'\n\n"
|
| 109 |
-
"As a customer service representative, craft a balanced response that:\n"
|
| 110 |
-
"- Provides additional relevant product information\n"
|
| 111 |
-
"- Highlights key service features\n"
|
| 112 |
-
"- Politely requests more detailed feedback\n"
|
| 113 |
-
"- Maintains professional tone (3-4 sentences)\n\n"
|
| 114 |
-
"Response:"
|
| 115 |
-
),
|
| 116 |
-
"sadness": (
|
| 117 |
-
f"'{user_review}'\n\n"
|
| 118 |
-
"As a customer service representative, craft an empathetic response that:\n"
|
| 119 |
-
"- Shows genuine understanding of the issue\n"
|
| 120 |
-
"- Proposes personalized recovery solution\n"
|
| 121 |
-
"- Offers extended support options\n"
|
| 122 |
-
"- Maintains positive outlook (3-4 sentences)\n\n"
|
| 123 |
-
"Response:"
|
| 124 |
-
),
|
| 125 |
-
"surprise": (
|
| 126 |
-
f"'{user_review}'\n\n"
|
| 127 |
-
"As a customer service representative, craft a response that:\n"
|
| 128 |
-
"- Matches customer's positive energy appropriately\n"
|
| 129 |
-
"- Highlights unexpected product benefits\n"
|
| 130 |
-
"- Invites to user community/events\n"
|
| 131 |
-
"- Maintains brand voice (3-4 sentences)\n\n"
|
| 132 |
-
"Response:"
|
| 133 |
-
)
|
| 134 |
-
}
|
| 135 |
-
|
| 136 |
-
prompt = emotion_prompts.get(
|
| 137 |
-
emotion_label,
|
| 138 |
-
f"Neutral feedback: '{user_review}'\n\nWrite a professional and concise response (50-200 words max).\n\nResponse:"
|
| 139 |
-
) # Default to neutral if emotion is not found
|
| 140 |
-
|
| 141 |
-
# Load the tokenizer and language model for response generation
|
| 142 |
-
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B") # Load tokenizer for processing text inputs
|
| 143 |
-
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B") # Load language model for response generation
|
| 144 |
-
|
| 145 |
-
inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the input prompt
|
| 146 |
-
outputs = model.generate(
|
| 147 |
-
**inputs,
|
| 148 |
-
max_new_tokens=300,
|
| 149 |
-
min_length=75, # Ensure concise and complete responses
|
| 150 |
-
no_repeat_ngram_size=2, # Avoid repetitive phrases
|
| 151 |
-
temperature=0.7 # Add randomness for more natural responses
|
| 152 |
-
)
|
| 153 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Decode the generated response
|
| 154 |
-
return response # Return the response
|
| 155 |
-
|
| 156 |
-
##########################################
|
| 157 |
-
# Step 3: Text-to-Speech Conversion Function
|
| 158 |
-
##########################################
|
| 159 |
-
def sound_gen(response):
|
| 160 |
-
"""
|
| 161 |
-
Convert the generated response to speech and save it as a .wav file.
|
| 162 |
-
"""
|
| 163 |
-
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") # Pre-trained processor for TTS
|
| 164 |
-
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") # Pre-trained TTS model
|
| 165 |
-
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") # Vocoder for generating waveforms
|
| 166 |
-
|
| 167 |
-
# Create speaker embedding to match text input
|
| 168 |
-
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") # Load speaker embeddings
|
| 169 |
-
speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) # Use a default embedding
|
| 170 |
-
|
| 171 |
-
inputs = processor(text=response, return_tensors="pt") # Process text for spectrogram generation
|
| 172 |
-
inputs["input_ids"] = inputs["input_ids"].to(torch.int32) # Match tensor format (fix runtime error)
|
| 173 |
-
spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings) # Generate the spectrogram
|
| 174 |
-
|
| 175 |
-
with torch.no_grad():
|
| 176 |
-
speech = vocoder(spectrogram) # Convert spectrogram to waveform
|
| 177 |
-
|
| 178 |
-
sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000) # Save as .wav file
|
| 179 |
-
st.audio("customer_service_response.wav", start_time=0) # Embed an auto-playing audio player
|
| 180 |
-
|
| 181 |
-
##########################################
|
| 182 |
-
# Main Function
|
| 183 |
-
##########################################
|
| 184 |
-
def main():
|
| 185 |
-
"""
|
| 186 |
-
Main function to handle sentiment analysis, response generation, and text-to-speech functionalities.
|
| 187 |
-
"""
|
| 188 |
-
if text: # Check if the user has entered a comment
|
| 189 |
-
response = response_gen(text) # Generate the response
|
| 190 |
-
st.markdown(
|
| 191 |
-
f"<p style='color:#3498DB; font-size:20px;'>{response}</p>",
|
| 192 |
-
unsafe_allow_html=True
|
| 193 |
-
) # Display the response with styled formatting
|
| 194 |
-
sound_gen(response) # Convert the response to speech and play it
|
| 195 |
-
|
| 196 |
-
# Execute the main function
|
| 197 |
-
if __name__ == "__main__":
|
| 198 |
-
main()
|
|
|
|
| 1 |
+
########################################## # Step 0: Import required libraries ########################################## import streamlit as st # For building the web application from transformers import ( pipeline, SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan, AutoModelForCausalLM, AutoTokenizer ) # For emotion analysis, text-to-speech, and text generation from datasets import load_dataset # For loading datasets (e.g., speaker embeddings) import torch # For tensor operations import soundfile as sf # For saving audio as .wav files ########################################## # Streamlit application title and input ########################################## st.title("Comment Reply for You") # Application title st.write("Generate automatic replies for user comments") # Application description text = st.text_area("Enter your comment", "") # Text input for user to enter comments ########################################## # Step 1: Sentiment Analysis Function ########################################## def analyze_dominant_emotion(user_review): """ Analyze the dominant emotion in the user's review using a text classification model. """ emotion_classifier = pipeline( "text-classification", model="Thea231/jhartmann_emotion_finetuning", return_all_scores=True ) # Load pre-trained emotion classification model emotion_results = emotion_classifier(user_review)[0] # Get emotion scores for the review dominant_emotion = max(emotion_results, key=lambda x: x['score']) # Find the emotion with the highest confidence return dominant_emotion ########################################## # Step 2: Response Generation Function ########################################## def response_gen(user_review): """ Generate a response based on the sentiment of the user's review. """ # Use Llama-based model to create a response based on a generated prompt dominant_emotion = analyze_dominant_emotion(user_review) # Get the dominant emotion emotion_label = dominant_emotion['label'].lower() # Extract emotion label # Define response templates for each emotion emotion_prompts = { "anger": ( "Customer complaint: '{review}'\n\n" "As a customer service representative, write a response that:\n" "- Sincerely apologizes for the issue\n" "- Explains how the issue will be resolved\n" "- Offers compensation where appropriate\n\n" "Response:" ), "joy": ( "Customer review: '{review}'\n\n" "As a customer service representative, write a positive response that:\n" "- Thanks the customer for their feedback\n" "- Acknowledges both positive and constructive comments\n" "- Invites them to explore loyalty programs\n\n" "Response:" ), # Add other emotions as needed... } # Format the prompt with the user's review prompt = emotion_prompts.get(emotion_label, "Neutral").format(review=user_review) # Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B") model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B") inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the prompt outputs = model.generate(**inputs, max_new_tokens=100) # Generate a response input_length = inputs.input_ids.shape[1] # Length of the input text response = tokenizer.decode(outputs[0][input_length:], skip_special_tokens=True) # Decode the generated text return response ########################################## # Step 3: Text-to-Speech Conversion Function ########################################## def sound_gen(response): """ Convert the generated response to speech and save as a .wav file. """ # Load the pre-trained TTS models processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") # Load speaker embeddings (e.g., neutral female voice) embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") speaker_embeddings = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) # Process the input text and generate a spectrogram inputs = processor(text=response, return_tensors="pt") spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings) # Use the vocoder to generate a waveform with torch.no_grad(): speech = vocoder(spectrogram) # Save the generated speech as a .wav file sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000) st.audio("customer_service_response.wav") # Play the audio in Streamlit ########################################## # Main Function ########################################## def main(): """ Main function to orchestrate the workflow of sentiment analysis, response generation, and text-to-speech. """ if text: # Check if the user entered a comment response = response_gen(text) # Generate a response st.write(f"Generated response: {response}") # Display the generated response sound_gen(response) # Convert the response to speech and play it # Run the main function if __name__ == "__main__": main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|