Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,7 +10,7 @@ from transformers import ( # For text classification, text-to-speech, and text
|
|
| 10 |
AutoModelForCausalLM,
|
| 11 |
AutoTokenizer
|
| 12 |
)
|
| 13 |
-
from datasets import load_dataset #
|
| 14 |
import torch # For tensor operations
|
| 15 |
import soundfile as sf # For saving audio as .wav files
|
| 16 |
import sentencepiece # Required by SpeechT5Processor for tokenization
|
|
@@ -18,25 +18,25 @@ import sentencepiece # Required by SpeechT5Processor for tokenization
|
|
| 18 |
##########################################
|
| 19 |
# Streamlit application title and input
|
| 20 |
##########################################
|
| 21 |
-
# Display a deep blue title
|
| 22 |
st.markdown(
|
| 23 |
-
"<h1 style='text-align: center; color: #00008B; font-size: 50px;'
|
| 24 |
unsafe_allow_html=True
|
| 25 |
-
) # Set deep blue
|
| 26 |
|
| 27 |
# Display a gentle, warm subtitle below the title
|
| 28 |
st.markdown(
|
| 29 |
-
"<h3 style='text-align: center; color: #5D6D7E; font-style: italic;'>I'm listening to you, my friend
|
| 30 |
unsafe_allow_html=True
|
| 31 |
-
) # Set
|
| 32 |
|
| 33 |
-
#
|
| 34 |
text = st.text_area(
|
| 35 |
"Enter your comment",
|
| 36 |
placeholder="Type something here...",
|
| 37 |
height=100,
|
| 38 |
-
help="Write a comment you would like us to respond to!" #
|
| 39 |
-
) # Create text input field
|
| 40 |
|
| 41 |
##########################################
|
| 42 |
# Step 1: Sentiment Analysis Function
|
|
@@ -45,14 +45,17 @@ def analyze_dominant_emotion(user_review):
|
|
| 45 |
"""
|
| 46 |
Analyze the dominant emotion in the user's comment using a fine-tuned text classification model.
|
| 47 |
"""
|
|
|
|
| 48 |
emotion_classifier = pipeline(
|
| 49 |
"text-classification",
|
| 50 |
model="Thea231/jhartmann_emotion_finetuning",
|
| 51 |
return_all_scores=True
|
| 52 |
-
)
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
|
|
|
|
|
|
| 56 |
|
| 57 |
##########################################
|
| 58 |
# Step 2: Response Generation Functions
|
|
@@ -61,9 +64,9 @@ def prompt_gen(user_review):
|
|
| 61 |
"""
|
| 62 |
Generate the text generation prompt based on the user's comment and detected emotion.
|
| 63 |
"""
|
| 64 |
-
#
|
| 65 |
-
dominant_emotion = analyze_dominant_emotion(user_review)
|
| 66 |
-
# Define
|
| 67 |
emotion_strategies = {
|
| 68 |
"anger": {
|
| 69 |
"prompt": (
|
|
@@ -72,7 +75,7 @@ def prompt_gen(user_review):
|
|
| 72 |
"- Begins with a sincere apology and acknowledgment.\n"
|
| 73 |
"- Clearly explains a solution process with concrete steps.\n"
|
| 74 |
"- Offers appropriate compensation or redemption.\n"
|
| 75 |
-
"- Keeps a humble
|
| 76 |
"Response:"
|
| 77 |
)
|
| 78 |
},
|
|
@@ -81,7 +84,7 @@ def prompt_gen(user_review):
|
|
| 81 |
"Customer quality concern: '{review}'\n\n"
|
| 82 |
"As a customer service representative, craft a response that:\n"
|
| 83 |
"- Immediately acknowledges the product issue.\n"
|
| 84 |
-
"- Explains
|
| 85 |
"- Provides clear return/replacement instructions.\n"
|
| 86 |
"- Offers a goodwill gesture (1-3 sentences).\n\n"
|
| 87 |
"Response:"
|
|
@@ -91,9 +94,9 @@ def prompt_gen(user_review):
|
|
| 91 |
"prompt": (
|
| 92 |
"Customer safety concern: '{review}'\n\n"
|
| 93 |
"As a customer service representative, craft a reassuring response that:\n"
|
| 94 |
-
"- Directly addresses
|
| 95 |
"- References relevant certifications or standards.\n"
|
| 96 |
-
"- Offers
|
| 97 |
"- Provides a satisfaction guarantee (1-3 sentences).\n\n"
|
| 98 |
"Response:"
|
| 99 |
)
|
|
@@ -104,7 +107,7 @@ def prompt_gen(user_review):
|
|
| 104 |
"As a customer service representative, craft a concise response that:\n"
|
| 105 |
"- Thanks the customer for their feedback.\n"
|
| 106 |
"- Acknowledges both positive and constructive points.\n"
|
| 107 |
-
"- Invites
|
| 108 |
"Response:"
|
| 109 |
)
|
| 110 |
},
|
|
@@ -112,9 +115,9 @@ def prompt_gen(user_review):
|
|
| 112 |
"prompt": (
|
| 113 |
"Customer feedback: '{review}'\n\n"
|
| 114 |
"As a customer service representative, craft a balanced response that:\n"
|
| 115 |
-
"- Provides
|
| 116 |
"- Highlights key service features.\n"
|
| 117 |
-
"- Politely requests
|
| 118 |
"- Maintains a professional tone (1-3 sentences).\n\n"
|
| 119 |
"Response:"
|
| 120 |
)
|
|
@@ -136,37 +139,40 @@ def prompt_gen(user_review):
|
|
| 136 |
"As a customer service representative, craft a response that:\n"
|
| 137 |
"- Matches the customer's positive energy.\n"
|
| 138 |
"- Highlights unexpected product benefits.\n"
|
| 139 |
-
"- Invites the customer to join community events
|
| 140 |
"- Maintains the brand's voice (1-3 sentences).\n\n"
|
| 141 |
"Response:"
|
| 142 |
)
|
| 143 |
}
|
| 144 |
-
} #
|
| 145 |
-
#
|
| 146 |
template = emotion_strategies.get(dominant_emotion["label"].lower(), emotion_strategies["neutral"])["prompt"]
|
| 147 |
-
prompt = template.format(review=user_review) #
|
| 148 |
-
print(f"Generated prompt: {prompt}") # Debug print using f-string
|
| 149 |
-
return prompt # Return the
|
| 150 |
|
| 151 |
def response_gen(user_review):
|
| 152 |
"""
|
| 153 |
-
Generate a response using text generation based on the user's comment.
|
| 154 |
"""
|
| 155 |
-
|
|
|
|
| 156 |
# Load the tokenizer and language model for text generation
|
| 157 |
-
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B") # Load tokenizer
|
| 158 |
-
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B") # Load causal language model
|
| 159 |
inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the prompt
|
|
|
|
| 160 |
outputs = model.generate(
|
| 161 |
**inputs,
|
| 162 |
-
max_new_tokens=100, # Allow up to 100 new tokens for the answer
|
| 163 |
-
min_length=30,
|
| 164 |
no_repeat_ngram_size=2, # Avoid repeated phrases
|
| 165 |
-
temperature=0.7
|
| 166 |
-
)
|
| 167 |
-
input_length = inputs.input_ids.shape[1] #
|
| 168 |
-
|
| 169 |
-
|
|
|
|
| 170 |
return response # Return the generated response
|
| 171 |
|
| 172 |
##########################################
|
|
@@ -176,20 +182,22 @@ def sound_gen(response):
|
|
| 176 |
"""
|
| 177 |
Convert the generated response to speech and embed an auto-playing audio player.
|
| 178 |
"""
|
| 179 |
-
# Load SpeechT5 processor, TTS model, and vocoder
|
| 180 |
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") # Load TTS processor
|
| 181 |
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") # Load TTS model
|
| 182 |
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") # Load vocoder
|
| 183 |
-
# Process the
|
| 184 |
-
inputs = processor(text=response, return_tensors="pt") #
|
| 185 |
-
#
|
| 186 |
-
speaker_embeddings = torch.zeros(1,
|
| 187 |
-
spectrogram
|
|
|
|
| 188 |
with torch.no_grad():
|
| 189 |
-
speech = vocoder(spectrogram) # Convert spectrogram
|
| 190 |
-
# Save the audio as a .wav file with 16kHz sampling rate
|
| 191 |
-
sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000)
|
| 192 |
-
|
|
|
|
| 193 |
|
| 194 |
##########################################
|
| 195 |
# Main Function
|
|
@@ -197,16 +205,16 @@ def sound_gen(response):
|
|
| 197 |
def main():
|
| 198 |
"""
|
| 199 |
Main function to orchestrate text generation and text-to-speech conversion.
|
| 200 |
-
It displays only the generated response and plays its audio.
|
| 201 |
"""
|
| 202 |
-
if text: #
|
| 203 |
-
response = response_gen(text) # Generate a response
|
| 204 |
st.markdown(
|
| 205 |
f"<p style='color:#3498DB; font-size:20px;'>{response}</p>",
|
| 206 |
unsafe_allow_html=True
|
| 207 |
-
) # Display the
|
| 208 |
-
sound_gen(response) # Convert the generated response to speech and embed the audio player
|
| 209 |
-
print(f"Final generated response: {response}") # Debug print using f-string
|
| 210 |
|
| 211 |
# Execute the main function when the script is run
|
| 212 |
if __name__ == "__main__":
|
|
|
|
| 10 |
AutoModelForCausalLM,
|
| 11 |
AutoTokenizer
|
| 12 |
)
|
| 13 |
+
from datasets import load_dataset # For loading speaker embeddings dataset
|
| 14 |
import torch # For tensor operations
|
| 15 |
import soundfile as sf # For saving audio as .wav files
|
| 16 |
import sentencepiece # Required by SpeechT5Processor for tokenization
|
|
|
|
| 18 |
##########################################
|
| 19 |
# Streamlit application title and input
|
| 20 |
##########################################
|
| 21 |
+
# Display a deep blue title using HTML and CSS
|
| 22 |
st.markdown(
|
| 23 |
+
"<h1 style='text-align: center; color: #00008B; font-size: 50px;'>Just Comment</h1>",
|
| 24 |
unsafe_allow_html=True
|
| 25 |
+
) # Set the title in deep blue
|
| 26 |
|
| 27 |
# Display a gentle, warm subtitle below the title
|
| 28 |
st.markdown(
|
| 29 |
+
"<h3 style='text-align: center; color: #5D6D7E; font-style: italic;'>I'm listening to you, my friend</h3>",
|
| 30 |
unsafe_allow_html=True
|
| 31 |
+
) # Set the subtitle with warm styling
|
| 32 |
|
| 33 |
+
# Provide a text area for user input with a placeholder and tooltip
|
| 34 |
text = st.text_area(
|
| 35 |
"Enter your comment",
|
| 36 |
placeholder="Type something here...",
|
| 37 |
height=100,
|
| 38 |
+
help="Write a comment you would like us to respond to!" # Tooltip for guidance
|
| 39 |
+
) # Create the text input field
|
| 40 |
|
| 41 |
##########################################
|
| 42 |
# Step 1: Sentiment Analysis Function
|
|
|
|
| 45 |
"""
|
| 46 |
Analyze the dominant emotion in the user's comment using a fine-tuned text classification model.
|
| 47 |
"""
|
| 48 |
+
# Load the fine-tuned sentiment classification model from Hugging Face
|
| 49 |
emotion_classifier = pipeline(
|
| 50 |
"text-classification",
|
| 51 |
model="Thea231/jhartmann_emotion_finetuning",
|
| 52 |
return_all_scores=True
|
| 53 |
+
)
|
| 54 |
+
# Get sentiment scores for the input text
|
| 55 |
+
emotion_results = emotion_classifier(user_review)[0]
|
| 56 |
+
# Identify the emotion with the highest confidence score
|
| 57 |
+
dominant_emotion = max(emotion_results, key=lambda x: x['score'])
|
| 58 |
+
return dominant_emotion # Return the dominant emotion as a dictionary
|
| 59 |
|
| 60 |
##########################################
|
| 61 |
# Step 2: Response Generation Functions
|
|
|
|
| 64 |
"""
|
| 65 |
Generate the text generation prompt based on the user's comment and detected emotion.
|
| 66 |
"""
|
| 67 |
+
# Determine the dominant emotion from the user's comment
|
| 68 |
+
dominant_emotion = analyze_dominant_emotion(user_review)
|
| 69 |
+
# Define prompt templates for seven emotions
|
| 70 |
emotion_strategies = {
|
| 71 |
"anger": {
|
| 72 |
"prompt": (
|
|
|
|
| 75 |
"- Begins with a sincere apology and acknowledgment.\n"
|
| 76 |
"- Clearly explains a solution process with concrete steps.\n"
|
| 77 |
"- Offers appropriate compensation or redemption.\n"
|
| 78 |
+
"- Keeps a humble, solution-focused tone (1-3 sentences).\n\n"
|
| 79 |
"Response:"
|
| 80 |
)
|
| 81 |
},
|
|
|
|
| 84 |
"Customer quality concern: '{review}'\n\n"
|
| 85 |
"As a customer service representative, craft a response that:\n"
|
| 86 |
"- Immediately acknowledges the product issue.\n"
|
| 87 |
+
"- Explains quality control measures being taken.\n"
|
| 88 |
"- Provides clear return/replacement instructions.\n"
|
| 89 |
"- Offers a goodwill gesture (1-3 sentences).\n\n"
|
| 90 |
"Response:"
|
|
|
|
| 94 |
"prompt": (
|
| 95 |
"Customer safety concern: '{review}'\n\n"
|
| 96 |
"As a customer service representative, craft a reassuring response that:\n"
|
| 97 |
+
"- Directly addresses safety worries.\n"
|
| 98 |
"- References relevant certifications or standards.\n"
|
| 99 |
+
"- Offers dedicated support contact.\n"
|
| 100 |
"- Provides a satisfaction guarantee (1-3 sentences).\n\n"
|
| 101 |
"Response:"
|
| 102 |
)
|
|
|
|
| 107 |
"As a customer service representative, craft a concise response that:\n"
|
| 108 |
"- Thanks the customer for their feedback.\n"
|
| 109 |
"- Acknowledges both positive and constructive points.\n"
|
| 110 |
+
"- Invites exploration of loyalty or referral programs (1-3 sentences).\n\n"
|
| 111 |
"Response:"
|
| 112 |
)
|
| 113 |
},
|
|
|
|
| 115 |
"prompt": (
|
| 116 |
"Customer feedback: '{review}'\n\n"
|
| 117 |
"As a customer service representative, craft a balanced response that:\n"
|
| 118 |
+
"- Provides relevant product information.\n"
|
| 119 |
"- Highlights key service features.\n"
|
| 120 |
+
"- Politely requests detailed feedback.\n"
|
| 121 |
"- Maintains a professional tone (1-3 sentences).\n\n"
|
| 122 |
"Response:"
|
| 123 |
)
|
|
|
|
| 139 |
"As a customer service representative, craft a response that:\n"
|
| 140 |
"- Matches the customer's positive energy.\n"
|
| 141 |
"- Highlights unexpected product benefits.\n"
|
| 142 |
+
"- Invites the customer to join community events.\n"
|
| 143 |
"- Maintains the brand's voice (1-3 sentences).\n\n"
|
| 144 |
"Response:"
|
| 145 |
)
|
| 146 |
}
|
| 147 |
+
} # End dictionary of prompt templates
|
| 148 |
+
# Select the template based on detected emotion; default to neutral if not found
|
| 149 |
template = emotion_strategies.get(dominant_emotion["label"].lower(), emotion_strategies["neutral"])["prompt"]
|
| 150 |
+
prompt = template.format(review=user_review) # Format the prompt with the user's comment
|
| 151 |
+
print(f"Generated prompt: {prompt}") # Debug: print the generated prompt using an f-string
|
| 152 |
+
return prompt # Return the text generation prompt
|
| 153 |
|
| 154 |
def response_gen(user_review):
|
| 155 |
"""
|
| 156 |
+
Generate a response using text generation based on the user's comment and detected emotion.
|
| 157 |
"""
|
| 158 |
+
# Get the text generation prompt based on the user's comment and its dominant emotion
|
| 159 |
+
prompt = prompt_gen(user_review)
|
| 160 |
# Load the tokenizer and language model for text generation
|
| 161 |
+
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-0.5B") # Load tokenizer
|
| 162 |
+
model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-0.5B") # Load causal language model
|
| 163 |
inputs = tokenizer(prompt, return_tensors="pt") # Tokenize the prompt
|
| 164 |
+
# Generate a response with constraints to ensure a concise and complete answer
|
| 165 |
outputs = model.generate(
|
| 166 |
**inputs,
|
| 167 |
+
max_new_tokens=100, # Allow up to 100 new tokens for the generated answer
|
| 168 |
+
min_length=30, # Ensure at least 30 tokens in the generated response
|
| 169 |
no_repeat_ngram_size=2, # Avoid repeated phrases
|
| 170 |
+
temperature=0.7 # Moderate randomness for creative responses
|
| 171 |
+
)
|
| 172 |
+
input_length = inputs.input_ids.shape[1] # Get the length of the input prompt
|
| 173 |
+
# Decode only the generated text (after the prompt)
|
| 174 |
+
response = tokenizer.decode(outputs[0][input_length:], skip_special_tokens=True)
|
| 175 |
+
print(f"Generated response: {response}") # Debug: print the generated response using an f-string
|
| 176 |
return response # Return the generated response
|
| 177 |
|
| 178 |
##########################################
|
|
|
|
| 182 |
"""
|
| 183 |
Convert the generated response to speech and embed an auto-playing audio player.
|
| 184 |
"""
|
| 185 |
+
# Load the SpeechT5 processor, TTS model, and vocoder for audio synthesis
|
| 186 |
processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") # Load TTS processor
|
| 187 |
model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") # Load TTS model
|
| 188 |
vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") # Load vocoder
|
| 189 |
+
# Process the entire generated response text for TTS
|
| 190 |
+
inputs = processor(text=response, return_tensors="pt") # Tokenize and process the response
|
| 191 |
+
# Create a dummy speaker embedding with the expected dimension (1 x 1280)
|
| 192 |
+
speaker_embeddings = torch.zeros(1, 1280, dtype=torch.float32) # Dummy embedding to avoid shape mismatches
|
| 193 |
+
# Generate the speech spectrogram using the input tokens and dummy speaker embeddings
|
| 194 |
+
spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings)
|
| 195 |
with torch.no_grad():
|
| 196 |
+
speech = vocoder(spectrogram) # Convert the spectrogram into an audio waveform
|
| 197 |
+
# Save the audio waveform as a .wav file with a 16kHz sampling rate
|
| 198 |
+
sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000)
|
| 199 |
+
# Embed an auto-playing audio player in the app to play the full response
|
| 200 |
+
st.audio("customer_service_response.wav", start_time=0)
|
| 201 |
|
| 202 |
##########################################
|
| 203 |
# Main Function
|
|
|
|
| 205 |
def main():
|
| 206 |
"""
|
| 207 |
Main function to orchestrate text generation and text-to-speech conversion.
|
| 208 |
+
It displays only the generated response and plays its audio without extra information.
|
| 209 |
"""
|
| 210 |
+
if text: # Only proceed if the user has entered a comment
|
| 211 |
+
response = response_gen(text) # Generate a response based on text generation and emotion detection
|
| 212 |
st.markdown(
|
| 213 |
f"<p style='color:#3498DB; font-size:20px;'>{response}</p>",
|
| 214 |
unsafe_allow_html=True
|
| 215 |
+
) # Display the response in styled formatting (only the final answer)
|
| 216 |
+
sound_gen(response) # Convert the full generated response to speech and embed the audio player
|
| 217 |
+
print(f"Final generated response: {response}") # Debug: print the final response using an f-string
|
| 218 |
|
| 219 |
# Execute the main function when the script is run
|
| 220 |
if __name__ == "__main__":
|