Spaces:
Running
Running
| ########################################## | |
| # Step 0: Import required libraries | |
| ########################################## | |
| import streamlit as st # For building the web application interface | |
| from transformers import ( | |
| pipeline, | |
| SpeechT5Processor, | |
| SpeechT5ForTextToSpeech, | |
| SpeechT5HifiGan, | |
| AutoModelForCausalLM, | |
| AutoTokenizer | |
| ) # For sentiment analysis, text-to-speech, and text generation | |
| from datasets import load_dataset # For loading datasets (e.g., speaker embeddings) | |
| import torch # For tensor operations | |
| import soundfile as sf # For saving audio as .wav files | |
| import sentencepiece # Required by SpeechT5Processor for tokenization | |
| ########################################## | |
| # Streamlit application title and input | |
| ########################################## | |
| # Display a deep blue title in large, visually appealing font | |
| st.markdown( | |
| "<h1 style='text-align: center; color: #00008B; font-size: 50px;'>Just Comment</h1>", | |
| unsafe_allow_html=True | |
| ) # Set a deep blue title | |
| # Display a gentle, warm subtitle below the title | |
| st.markdown( | |
| "<h3 style='text-align: center; color: #5D6D7E; font-style: italic;'>I'm listening to you, my friend</h3>", | |
| unsafe_allow_html=True | |
| ) # Show a friendly subtitle | |
| # Provide a text area for user input with placeholder and tooltip | |
| text = st.text_area( | |
| "Enter your comment", | |
| placeholder="Type something here...", | |
| height=100, | |
| help="Write a comment you would like us to respond to!" # Tooltip for guidance | |
| ) # Create a text input area | |
| ########################################## | |
| # Step 1: Sentiment Analysis Function (Unused here) | |
| ########################################## | |
| def analyze_dominant_emotion(user_review): | |
| """ | |
| Analyze the dominant emotion in the user's comment using a fine-tuned text classification model. | |
| """ | |
| emotion_classifier = pipeline( | |
| "text-classification", | |
| model="Thea231/jhartmann_emotion_finetuning", | |
| return_all_scores=True | |
| ) # Load the sentiment classification model | |
| emotion_results = emotion_classifier(user_review)[0] # Get sentiment scores of the input | |
| dominant_emotion = max(emotion_results, key=lambda x: x['score']) # Find the highest scoring emotion | |
| return dominant_emotion # Return the dominant emotion | |
| ########################################## | |
| # Step 2: Response Generation Functions | |
| ########################################## | |
| def prompt_gen(user_review): | |
| """ | |
| Generate a prompt based on the user's comment and detected emotion. | |
| This function is defined but not used, as the response is fixed. | |
| """ | |
| dominant_emotion = analyze_dominant_emotion(user_review) # Determine the dominant emotion | |
| emotion_strategies = { | |
| "anger": { | |
| "prompt": ( | |
| "Customer complaint: '{review}'\n\n" | |
| "As a customer service representative, craft a professional response that:\n" | |
| "- Begins with sincere apology and acknowledgment\n" | |
| "- Clearly explains solution process with concrete steps\n" | |
| "- Offers appropriate compensation/redemption\n" | |
| "- Keeps tone humble and solution-focused (1-3 sentences)\n\n" | |
| "Response:" | |
| ) | |
| }, | |
| "disgust": { | |
| "prompt": ( | |
| "Customer quality concern: '{review}'\n\n" | |
| "As a customer service representative, craft a response that:\n" | |
| "- Immediately acknowledges the product issue\n" | |
| "- Explains quality control measures being taken\n" | |
| "- Provides clear return/replacement instructions\n" | |
| "- Offers goodwill gesture (1-3 sentences)\n\n" | |
| "Response:" | |
| ) | |
| }, | |
| "fear": { | |
| "prompt": ( | |
| "Customer safety concern: '{review}'\n\n" | |
| "As a customer service representative, craft a reassuring response that:\n" | |
| "- Directly addresses the safety worries\n" | |
| "- References relevant certifications/standards\n" | |
| "- Offers dedicated support contact\n" | |
| "- Provides satisfaction guarantee (1-3 sentences)\n\n" | |
| "Response:" | |
| ) | |
| }, | |
| "joy": { | |
| "prompt": ( | |
| "Customer review: '{review}'\n\n" | |
| "As a customer service representative, craft a concise response that:\n" | |
| "- Specifically acknowledges both positive and constructive feedback\n" | |
| "- Briefly mentions loyalty/referral programs\n" | |
| "- Ends with shopping invitation (1-3 sentences)\n\n" | |
| "Response:" | |
| ) | |
| }, | |
| "neutral": { | |
| "prompt": ( | |
| "Customer feedback: '{review}'\n\n" | |
| "As a customer service representative, craft a balanced response that:\n" | |
| "- Provides additional relevant product information\n" | |
| "- Highlights key service features\n" | |
| "- Politely requests more detailed feedback\n" | |
| "- Maintains professional tone (1-3 sentences)\n\n" | |
| "Response:" | |
| ) | |
| }, | |
| "sadness": { | |
| "prompt": ( | |
| "Customer disappointment: '{review}'\n\n" | |
| "As a customer service representative, craft an empathetic response that:\n" | |
| "- Shows genuine understanding of the issue\n" | |
| "- Proposes personalized recovery solution\n" | |
| "- Offers extended support options\n" | |
| "- Maintains positive outlook (1-3 sentences)\n\n" | |
| "Response:" | |
| ) | |
| }, | |
| "surprise": { | |
| "prompt": ( | |
| "Customer enthusiastic feedback: '{review}'\n\n" | |
| "As a customer service representative, craft a response that:\n" | |
| "- Matches customer's positive energy appropriately\n" | |
| "- Highlights unexpected product benefits\n" | |
| "- Invites to user community/events\n" | |
| "- Maintains brand voice (1-3 sentences)\n\n" | |
| "Response:" | |
| ) | |
| } | |
| } # Mapping of each emotion to its response template | |
| template = emotion_strategies[dominant_emotion['label'].lower()]["prompt"] # Select template based on emotion | |
| prompt = template.format(review=user_review) # Format the template with the user's review | |
| print(f"Prompt generated: {prompt}") # Debug: print the generated prompt using an f-string | |
| return prompt # Return the constructed prompt | |
| def response_gen(user_review): | |
| """ | |
| Generate a response based on the user's comment. | |
| For this application, always return a fixed response message. | |
| """ | |
| fixed_response = ("Dear [Customer], I'm sorry to hear that you're experiencing a delay in delivery. " | |
| "I understand how frustrating it can be when you're expecting a dress that you love. " | |
| "I'd be happy to help you resolve this issue.") | |
| print(f"Response generated: {fixed_response}") # Debug: print the generated response using an f-string | |
| return fixed_response # Return the fixed response message | |
| ########################################## | |
| # Step 3: Text-to-Speech Conversion Function | |
| ########################################## | |
| def sound_gen(response): | |
| """ | |
| Convert the fixed response to speech and save it as a .wav file, | |
| then embed an auto-playing audio player. | |
| """ | |
| processor = SpeechT5Processor.from_pretrained("microsoft/speecht5_tts") # Load the TTS processor | |
| model = SpeechT5ForTextToSpeech.from_pretrained("microsoft/speecht5_tts") # Load the TTS model | |
| vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan") # Load the vocoder for waveform generation | |
| # Process the full response text (no truncation) for spectrogram generation | |
| inputs = processor(text=response, return_tensors="pt") # Tokenize and process the response text for TTS | |
| # Use dummy speaker embeddings (zeros) with the expected dimension (1 x 768) | |
| speaker_embeddings = torch.zeros(1, 768) # Create placeholder speaker embeddings | |
| spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings) # Generate the speech spectrogram | |
| with torch.no_grad(): | |
| speech = vocoder(spectrogram) # Convert the spectrogram to an audio waveform using the vocoder | |
| sf.write("customer_service_response.wav", speech.numpy(), samplerate=16000) # Save the waveform as a .wav file | |
| st.audio("customer_service_response.wav", start_time=0) # Embed an audio player that autoplays the audio | |
| ########################################## | |
| # Main Function | |
| ########################################## | |
| def main(): | |
| """ | |
| The main function orchestrates response generation and text-to-speech conversion. | |
| It displays only the fixed response and plays its audio. | |
| """ | |
| if text: # Check if the user has entered a comment (although the response is fixed) | |
| response = response_gen(text) # Generate the fixed response message | |
| st.markdown( | |
| f"<p style='color:#3498DB; font-size:20px;'>{response}</p>", | |
| unsafe_allow_html=True | |
| ) # Display the response in styled formatting (only the fixed message is shown) | |
| sound_gen(response) # Convert the response to speech and play it | |
| print(f"Final response output: {response}") # Debug: print the final response using an f-string | |
| # Execute the main function when the script is run | |
| if __name__ == "__main__": | |
| main() # Call the main function | |