Spaces:
Sleeping
Sleeping
Commit
Β·
422e708
1
Parent(s):
0711be9
Add Conversation Analysis Dashboard for Hugging Face Spaces
Browse files- Add conversation_analysis_app.py as main entry point
- Add conversation_analysis_ui.py with Gradio interface
- Update MongoDB configuration to use keshavchhaparia instance
- Add requirements.txt and README.md for HF Spaces
- Add migration script for MongoDB data
- Update .gitignore and add environment template
- .gitignore +2 -3
- Makefile +3 -1
- README.md +31 -53
- configs/conversation_analysis_app.yaml +40 -0
- conversation_analysis_app.py +45 -0
- env_template.txt +21 -0
- migrate_mongodb_data.py +139 -0
- requirements.txt +6 -15
- src/second_brain_online/application/ui/conversation_analysis_ui.py +376 -0
- src/second_brain_online/config.py +1 -1
.gitignore
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
# Environment
|
| 2 |
.env
|
| 3 |
.env.local
|
| 4 |
.env.production
|
|
@@ -27,7 +27,6 @@ wheels/
|
|
| 27 |
|
| 28 |
# Virtual environments
|
| 29 |
.venv/
|
| 30 |
-
.venv-online/
|
| 31 |
venv/
|
| 32 |
ENV/
|
| 33 |
env/
|
|
@@ -48,4 +47,4 @@ logs/
|
|
| 48 |
|
| 49 |
# Temporary files
|
| 50 |
*.tmp
|
| 51 |
-
*.temp
|
|
|
|
| 1 |
+
# Environment variables
|
| 2 |
.env
|
| 3 |
.env.local
|
| 4 |
.env.production
|
|
|
|
| 27 |
|
| 28 |
# Virtual environments
|
| 29 |
.venv/
|
|
|
|
| 30 |
venv/
|
| 31 |
ENV/
|
| 32 |
env/
|
|
|
|
| 47 |
|
| 48 |
# Temporary files
|
| 49 |
*.tmp
|
| 50 |
+
*.temp
|
Makefile
CHANGED
|
@@ -45,11 +45,13 @@ run_agent_app: check-config
|
|
| 45 |
uv run python -m tools.app --retriever-config-path=$(RETRIEVER_CONFIG) --ui
|
| 46 |
|
| 47 |
run_agent_query: check-config
|
| 48 |
-
uv run python -m tools.app --retriever-config-path=$(RETRIEVER_CONFIG) --query "What
|
| 49 |
|
| 50 |
evaluate_agent: check-config
|
| 51 |
uv run python -m tools.evaluate_app --retriever-config-path=$(RETRIEVER_CONFIG)
|
| 52 |
|
|
|
|
|
|
|
| 53 |
|
| 54 |
# --- QA ---
|
| 55 |
|
|
|
|
| 45 |
uv run python -m tools.app --retriever-config-path=$(RETRIEVER_CONFIG) --ui
|
| 46 |
|
| 47 |
run_agent_query: check-config
|
| 48 |
+
uv run python -m tools.app --retriever-config-path=$(RETRIEVER_CONFIG) --query "What pricing objections have been raised?"
|
| 49 |
|
| 50 |
evaluate_agent: check-config
|
| 51 |
uv run python -m tools.evaluate_app --retriever-config-path=$(RETRIEVER_CONFIG)
|
| 52 |
|
| 53 |
+
run_conversation_analysis_ui: # Launch Conversation Analysis Dashboard
|
| 54 |
+
uv run python conversation_analysis_app.py
|
| 55 |
|
| 56 |
# --- QA ---
|
| 57 |
|
README.md
CHANGED
|
@@ -1,73 +1,51 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
-
app_file:
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
|
|
|
| 11 |
---
|
| 12 |
|
| 13 |
-
#
|
| 14 |
|
| 15 |
-
|
| 16 |
|
| 17 |
## Features
|
| 18 |
|
| 19 |
-
-
|
| 20 |
-
-
|
| 21 |
-
-
|
| 22 |
-
-
|
| 23 |
-
-
|
| 24 |
|
| 25 |
-
##
|
| 26 |
|
| 27 |
-
1.
|
| 28 |
-
2.
|
| 29 |
-
3. View
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
-
##
|
| 33 |
|
| 34 |
-
-
|
| 35 |
-
-
|
| 36 |
-
-
|
| 37 |
-
-
|
| 38 |
|
| 39 |
-
##
|
| 40 |
|
| 41 |
-
|
| 42 |
-
- `
|
| 43 |
-
- `
|
| 44 |
-
- `MONGODB_DATABASE_NAME`: Database name (default: second_brain_course)
|
| 45 |
-
- `MONGODB_COLLECTION_NAME`: Collection name (default: rag)
|
| 46 |
-
- `COMET_API_KEY`: Comet ML API key for tracking
|
| 47 |
-
- `COMET_PROJECT`: Project name (default: second_brain_course)
|
| 48 |
-
- `RETRIEVER_CONFIG_PATH`: Path to retriever config (default: configs/compute_rag_vector_index_openai_contextual_simple.yaml)
|
| 49 |
|
| 50 |
-
##
|
| 51 |
|
| 52 |
-
|
| 53 |
-
- **Embeddings**: OpenAI text-embedding-3-small for document embeddings
|
| 54 |
-
- **LLM**: GPT-4o-mini for response generation
|
| 55 |
-
- **UI**: Custom Gradio interface with enhanced formatting
|
| 56 |
-
- **Tools**: MongoDB retriever and final answer tools
|
| 57 |
-
|
| 58 |
-
## Local Development
|
| 59 |
-
|
| 60 |
-
```bash
|
| 61 |
-
# Install dependencies
|
| 62 |
-
uv sync
|
| 63 |
-
|
| 64 |
-
# Run the agent
|
| 65 |
-
make run_agent_app
|
| 66 |
-
|
| 67 |
-
# Or run directly
|
| 68 |
-
python app.py
|
| 69 |
-
```
|
| 70 |
-
|
| 71 |
-
## License
|
| 72 |
-
|
| 73 |
-
MIT License
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Conversation Analysis Dashboard
|
| 3 |
+
emoji: π―
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 4.0.0
|
| 8 |
+
app_file: conversation_analysis_app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
| 11 |
+
short_description: AI-powered conversation analysis with insights and summaries
|
| 12 |
---
|
| 13 |
|
| 14 |
+
# π― Conversation Analysis Dashboard
|
| 15 |
|
| 16 |
+
An AI-powered dashboard for analyzing customer conversations with intelligent insights, summaries, and follow-up email generation.
|
| 17 |
|
| 18 |
## Features
|
| 19 |
|
| 20 |
+
- **π Conversation Analysis**: View and analyze customer conversations with quality scores and sentiment analysis
|
| 21 |
+
- **π‘ AI Insights**: Get marketing insights and key findings from conversations
|
| 22 |
+
- **π§ Follow-up Emails**: Generate contextual follow-up emails based on conversation analysis
|
| 23 |
+
- **π Smart Filtering**: Filter conversations by quality score, sentiment, and search terms
|
| 24 |
+
- **π Real-time Updates**: Dynamic table updates with conversation details
|
| 25 |
|
| 26 |
+
## How to Use
|
| 27 |
|
| 28 |
+
1. **View Conversations**: Browse through analyzed conversations in the main table
|
| 29 |
+
2. **Filter Data**: Use the quality score slider, sentiment dropdown, and search box to filter conversations
|
| 30 |
+
3. **View Details**: Click on any conversation row to see detailed analysis including:
|
| 31 |
+
- Contextual summary
|
| 32 |
+
- Marketing insights with quotes and sentiment
|
| 33 |
+
- Generated follow-up email
|
| 34 |
+
4. **Refresh Data**: Use the refresh button to reload the latest conversation data
|
| 35 |
|
| 36 |
+
## Technology Stack
|
| 37 |
|
| 38 |
+
- **Frontend**: Gradio for interactive UI
|
| 39 |
+
- **Backend**: Python with MongoDB for data storage
|
| 40 |
+
- **AI**: OpenAI GPT models for conversation analysis
|
| 41 |
+
- **Database**: MongoDB Atlas for conversation and RAG data storage
|
| 42 |
|
| 43 |
+
## Data Sources
|
| 44 |
|
| 45 |
+
The dashboard analyzes conversations from MongoDB collections:
|
| 46 |
+
- `test_intercom_data`: Main conversation data with analysis results
|
| 47 |
+
- `rag_intercom`: RAG index for semantic search and context retrieval
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
+
## Getting Started
|
| 50 |
|
| 51 |
+
The app will automatically connect to the configured MongoDB instance and load conversation data. No additional setup required!
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/conversation_analysis_app.yaml
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Conversation Analysis App Configuration
|
| 2 |
+
|
| 3 |
+
mongodb:
|
| 4 |
+
uri: "mongodb+srv://contextdb:HOqIgSH01CoEiMb1@cluster0.d9cmff.mongodb.net/"
|
| 5 |
+
database: "second_brain_course"
|
| 6 |
+
collection: "test_intercom_data"
|
| 7 |
+
|
| 8 |
+
ui:
|
| 9 |
+
title: "Conversation Analysis Dashboard"
|
| 10 |
+
theme: "soft"
|
| 11 |
+
default_limit: 100
|
| 12 |
+
max_limit: 1000
|
| 13 |
+
|
| 14 |
+
filters:
|
| 15 |
+
quality_score:
|
| 16 |
+
min: 0.0
|
| 17 |
+
max: 1.0
|
| 18 |
+
step: 0.01
|
| 19 |
+
sentiment_options: ["All", "Positive", "Negative", "Neutral", "Confused"]
|
| 20 |
+
search_fields: ["content", "conversation_analysis.aggregated_contextual_summary"]
|
| 21 |
+
|
| 22 |
+
display:
|
| 23 |
+
summary_truncate: 100
|
| 24 |
+
date_format: "%Y-%m-%d %H:%M"
|
| 25 |
+
quality_colors:
|
| 26 |
+
high: "#28a745" # Green
|
| 27 |
+
medium: "#ffc107" # Yellow
|
| 28 |
+
low: "#dc3545" # Red
|
| 29 |
+
|
| 30 |
+
# MongoDB query optimization
|
| 31 |
+
query:
|
| 32 |
+
default_limit: 100
|
| 33 |
+
max_limit: 1000
|
| 34 |
+
batch_size: 50
|
| 35 |
+
|
| 36 |
+
# Error handling
|
| 37 |
+
error_handling:
|
| 38 |
+
show_errors: true
|
| 39 |
+
log_level: "INFO"
|
| 40 |
+
retry_attempts: 3
|
conversation_analysis_app.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Hugging Face Space app for Conversation Analysis Dashboard.
|
| 4 |
+
|
| 5 |
+
This app displays conversation analysis results in a tabular format,
|
| 6 |
+
showing insights, summaries, and follow-up emails for all conversations
|
| 7 |
+
from the test_intercom_data collection.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import os
|
| 11 |
+
import sys
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
|
| 14 |
+
# Add paths
|
| 15 |
+
sys.path.append('.')
|
| 16 |
+
sys.path.append('src')
|
| 17 |
+
|
| 18 |
+
from second_brain_online.application.ui.conversation_analysis_ui import ConversationAnalysisUI
|
| 19 |
+
|
| 20 |
+
def main():
|
| 21 |
+
"""Main function for HF Space deployment."""
|
| 22 |
+
print("π Starting Conversation Analysis Dashboard...")
|
| 23 |
+
print("π Loading conversation analysis data from MongoDB...")
|
| 24 |
+
|
| 25 |
+
try:
|
| 26 |
+
# Initialize UI
|
| 27 |
+
ui = ConversationAnalysisUI()
|
| 28 |
+
|
| 29 |
+
print("β
UI initialized successfully")
|
| 30 |
+
print("π Launching Gradio interface...")
|
| 31 |
+
|
| 32 |
+
# Launch the interface
|
| 33 |
+
ui.launch(
|
| 34 |
+
server_name="0.0.0.0",
|
| 35 |
+
server_port=7860,
|
| 36 |
+
share=False,
|
| 37 |
+
show_error=True
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
except Exception as e:
|
| 41 |
+
print(f"β Error starting the application: {e}")
|
| 42 |
+
raise
|
| 43 |
+
|
| 44 |
+
if __name__ == "__main__":
|
| 45 |
+
main()
|
env_template.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copy this to .env and fill in your actual values
|
| 2 |
+
|
| 3 |
+
# MongoDB Configuration
|
| 4 |
+
MONGODB_URI=mongodb+srv://keshavchhaparia:bUSBXeVCGWDyQhDG@saaslabs.awtivxf.mongodb.net/
|
| 5 |
+
MONGODB_DATABASE_NAME=second_brain_course
|
| 6 |
+
MONGODB_COLLECTION_NAME=rag_intercom
|
| 7 |
+
|
| 8 |
+
# OpenAI Configuration
|
| 9 |
+
OPENAI_API_KEY=your_openai_api_key_here
|
| 10 |
+
|
| 11 |
+
# Comet ML & Opik Configuration
|
| 12 |
+
COMET_API_KEY=yPmLa7W6QyBODw1Pnfg9jqr7E
|
| 13 |
+
COMET_PROJECT=second_brain_course
|
| 14 |
+
|
| 15 |
+
# Hugging Face Configuration
|
| 16 |
+
HUGGINGFACE_ACCESS_TOKEN=your_huggingface_token_here
|
| 17 |
+
USE_HUGGINGFACE_DEDICATED_ENDPOINT=false
|
| 18 |
+
HUGGINGFACE_DEDICATED_ENDPOINT=
|
| 19 |
+
|
| 20 |
+
# Model Configuration
|
| 21 |
+
OPENAI_MODEL_ID=gpt-4o
|
migrate_mongodb_data.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Script to migrate test_intercom_data from contextdb instance to keshavchhaparia instance.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import sys
|
| 7 |
+
from pymongo import MongoClient
|
| 8 |
+
from loguru import logger
|
| 9 |
+
|
| 10 |
+
# Source MongoDB (contextdb instance)
|
| 11 |
+
SOURCE_URI = "mongodb+srv://contextdb:HOqIgSH01CoEiMb1@cluster0.d9cmff.mongodb.net/"
|
| 12 |
+
SOURCE_DB = "second_brain_course"
|
| 13 |
+
SOURCE_COLLECTION = "test_intercom_data"
|
| 14 |
+
|
| 15 |
+
# Target MongoDB (keshavchhaparia instance)
|
| 16 |
+
TARGET_URI = "mongodb+srv://keshavchhaparia:bUSBXeVCGWDyQhDG@saaslabs.awtivxf.mongodb.net/"
|
| 17 |
+
TARGET_DB = "second_brain_course"
|
| 18 |
+
TARGET_COLLECTION = "test_intercom_data"
|
| 19 |
+
|
| 20 |
+
def migrate_data():
|
| 21 |
+
"""Migrate test_intercom_data collection from source to target MongoDB."""
|
| 22 |
+
|
| 23 |
+
logger.info("π Starting MongoDB data migration...")
|
| 24 |
+
|
| 25 |
+
# Connect to source MongoDB
|
| 26 |
+
logger.info(f"π‘ Connecting to source MongoDB: {SOURCE_URI}")
|
| 27 |
+
try:
|
| 28 |
+
source_client = MongoClient(SOURCE_URI)
|
| 29 |
+
source_db = source_client[SOURCE_DB]
|
| 30 |
+
source_collection = source_db[SOURCE_COLLECTION]
|
| 31 |
+
logger.info("β
Connected to source MongoDB")
|
| 32 |
+
except Exception as e:
|
| 33 |
+
logger.error(f"β Failed to connect to source MongoDB: {e}")
|
| 34 |
+
return False
|
| 35 |
+
|
| 36 |
+
# Connect to target MongoDB
|
| 37 |
+
logger.info(f"π‘ Connecting to target MongoDB: {TARGET_URI}")
|
| 38 |
+
try:
|
| 39 |
+
target_client = MongoClient(TARGET_URI)
|
| 40 |
+
target_db = target_client[TARGET_DB]
|
| 41 |
+
target_collection = target_db[TARGET_COLLECTION]
|
| 42 |
+
logger.info("β
Connected to target MongoDB")
|
| 43 |
+
except Exception as e:
|
| 44 |
+
logger.error(f"β Failed to connect to target MongoDB: {e}")
|
| 45 |
+
return False
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
# Get document count from source
|
| 49 |
+
source_count = source_collection.count_documents({})
|
| 50 |
+
logger.info(f"π Source collection has {source_count} documents")
|
| 51 |
+
|
| 52 |
+
if source_count == 0:
|
| 53 |
+
logger.warning("β οΈ Source collection is empty, nothing to migrate")
|
| 54 |
+
return True
|
| 55 |
+
|
| 56 |
+
# Delete existing target collection
|
| 57 |
+
logger.info(f"ποΈ Deleting existing target collection: {TARGET_COLLECTION}")
|
| 58 |
+
target_collection.drop()
|
| 59 |
+
logger.info("β
Target collection deleted")
|
| 60 |
+
|
| 61 |
+
# Copy documents from source to target
|
| 62 |
+
logger.info("π Copying documents from source to target...")
|
| 63 |
+
|
| 64 |
+
# Process in batches to avoid memory issues
|
| 65 |
+
batch_size = 100
|
| 66 |
+
total_copied = 0
|
| 67 |
+
|
| 68 |
+
for skip in range(0, source_count, batch_size):
|
| 69 |
+
# Get batch of documents
|
| 70 |
+
documents = list(source_collection.find().skip(skip).limit(batch_size))
|
| 71 |
+
|
| 72 |
+
if documents:
|
| 73 |
+
# Insert batch into target
|
| 74 |
+
target_collection.insert_many(documents)
|
| 75 |
+
total_copied += len(documents)
|
| 76 |
+
logger.info(f"π¦ Copied batch: {len(documents)} documents (Total: {total_copied}/{source_count})")
|
| 77 |
+
|
| 78 |
+
# Verify migration
|
| 79 |
+
target_count = target_collection.count_documents({})
|
| 80 |
+
logger.info(f"β
Migration completed! Target collection has {target_count} documents")
|
| 81 |
+
|
| 82 |
+
if target_count == source_count:
|
| 83 |
+
logger.info("π Migration successful - document counts match!")
|
| 84 |
+
return True
|
| 85 |
+
else:
|
| 86 |
+
logger.error(f"β Migration failed - document count mismatch: {target_count} vs {source_count}")
|
| 87 |
+
return False
|
| 88 |
+
|
| 89 |
+
except Exception as e:
|
| 90 |
+
logger.error(f"β Migration failed: {e}")
|
| 91 |
+
return False
|
| 92 |
+
|
| 93 |
+
finally:
|
| 94 |
+
# Close connections
|
| 95 |
+
source_client.close()
|
| 96 |
+
target_client.close()
|
| 97 |
+
logger.info("π MongoDB connections closed")
|
| 98 |
+
|
| 99 |
+
def verify_migration():
|
| 100 |
+
"""Verify the migration was successful."""
|
| 101 |
+
logger.info("π Verifying migration...")
|
| 102 |
+
|
| 103 |
+
try:
|
| 104 |
+
# Connect to target MongoDB
|
| 105 |
+
target_client = MongoClient(TARGET_URI)
|
| 106 |
+
target_db = target_client[TARGET_DB]
|
| 107 |
+
target_collection = target_db[TARGET_COLLECTION]
|
| 108 |
+
|
| 109 |
+
# Get sample documents
|
| 110 |
+
sample_docs = list(target_collection.find().limit(3))
|
| 111 |
+
logger.info(f"π Sample documents in target collection:")
|
| 112 |
+
|
| 113 |
+
for i, doc in enumerate(sample_docs, 1):
|
| 114 |
+
conversation_id = doc.get('metadata', {}).get('properties', {}).get('conversation_id', 'N/A')
|
| 115 |
+
has_analysis = 'conversation_analysis' in doc
|
| 116 |
+
quality_score = doc.get('content_quality_score', 'N/A')
|
| 117 |
+
logger.info(f" {i}. Conversation ID: {conversation_id}, Has Analysis: {has_analysis}, Quality: {quality_score}")
|
| 118 |
+
|
| 119 |
+
target_client.close()
|
| 120 |
+
logger.info("β
Verification completed")
|
| 121 |
+
|
| 122 |
+
except Exception as e:
|
| 123 |
+
logger.error(f"β Verification failed: {e}")
|
| 124 |
+
|
| 125 |
+
if __name__ == "__main__":
|
| 126 |
+
logger.info("=" * 60)
|
| 127 |
+
logger.info("π MongoDB Data Migration Script")
|
| 128 |
+
logger.info("=" * 60)
|
| 129 |
+
|
| 130 |
+
# Run migration
|
| 131 |
+
success = migrate_data()
|
| 132 |
+
|
| 133 |
+
if success:
|
| 134 |
+
# Verify migration
|
| 135 |
+
verify_migration()
|
| 136 |
+
logger.info("π Migration completed successfully!")
|
| 137 |
+
else:
|
| 138 |
+
logger.error("β Migration failed!")
|
| 139 |
+
sys.exit(1)
|
requirements.txt
CHANGED
|
@@ -1,15 +1,6 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
langchain-mongodb>=0.4.0
|
| 8 |
-
langchain-openai>=0.3.1
|
| 9 |
-
langchain-core>=0.3.30
|
| 10 |
-
gradio>=5.12.0
|
| 11 |
-
smolagents==1.4.1
|
| 12 |
-
opik>=1.4.2
|
| 13 |
-
comet_ml>=3.47.6
|
| 14 |
-
langchain-huggingface>=0.1.2
|
| 15 |
-
huggingface-hub>=0.27.1
|
|
|
|
| 1 |
+
gradio>=4.0.0
|
| 2 |
+
pymongo>=4.0.0
|
| 3 |
+
pandas>=1.5.0
|
| 4 |
+
python-dotenv>=1.0.0
|
| 5 |
+
opik>=0.1.0
|
| 6 |
+
loguru>=0.7.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/second_brain_online/application/ui/conversation_analysis_ui.py
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import pandas as pd
|
| 3 |
+
from pymongo import MongoClient
|
| 4 |
+
from typing import List, Dict, Any, Optional, Tuple
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
import json
|
| 7 |
+
import re
|
| 8 |
+
|
| 9 |
+
class ConversationAnalysisUI:
|
| 10 |
+
"""Gradio UI for displaying conversation analysis results."""
|
| 11 |
+
|
| 12 |
+
def __init__(self):
|
| 13 |
+
# Use keshavchhaparia MongoDB instance (same as RAG system)
|
| 14 |
+
self.mongodb_uri = "mongodb+srv://keshavchhaparia:bUSBXeVCGWDyQhDG@saaslabs.awtivxf.mongodb.net/"
|
| 15 |
+
self.database_name = "second_brain_course"
|
| 16 |
+
self.collection_name = "test_intercom_data"
|
| 17 |
+
|
| 18 |
+
self.setup_mongodb()
|
| 19 |
+
self.setup_ui()
|
| 20 |
+
|
| 21 |
+
def setup_mongodb(self):
|
| 22 |
+
"""Initialize MongoDB connection."""
|
| 23 |
+
try:
|
| 24 |
+
self.client = MongoClient(self.mongodb_uri)
|
| 25 |
+
self.db = self.client[self.database_name]
|
| 26 |
+
self.collection = self.db[self.collection_name]
|
| 27 |
+
print(f"β
Connected to MongoDB: {self.database_name}.{self.collection_name}")
|
| 28 |
+
except Exception as e:
|
| 29 |
+
print(f"β MongoDB connection failed: {e}")
|
| 30 |
+
raise
|
| 31 |
+
|
| 32 |
+
def load_conversations(self,
|
| 33 |
+
quality_min: float = 0.0,
|
| 34 |
+
quality_max: float = 1.0,
|
| 35 |
+
sentiment: str = "All",
|
| 36 |
+
search_text: str = "",
|
| 37 |
+
limit: int = 100) -> pd.DataFrame:
|
| 38 |
+
"""Load and filter conversations."""
|
| 39 |
+
try:
|
| 40 |
+
# Build query
|
| 41 |
+
query = {
|
| 42 |
+
'conversation_analysis': {'$exists': True, '$ne': None},
|
| 43 |
+
'content_quality_score': {'$gte': quality_min, '$lte': quality_max}
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
# Add sentiment filter
|
| 47 |
+
if sentiment != "All":
|
| 48 |
+
query['conversation_analysis.aggregated_marketing_insights.quotes.sentiment'] = sentiment
|
| 49 |
+
|
| 50 |
+
# Add text search
|
| 51 |
+
if search_text:
|
| 52 |
+
query['$or'] = [
|
| 53 |
+
{'content': {'$regex': search_text, '$options': 'i'}},
|
| 54 |
+
{'conversation_analysis.aggregated_contextual_summary': {'$regex': search_text, '$options': 'i'}}
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
# Fetch documents
|
| 58 |
+
docs = list(self.collection.find(query).limit(limit))
|
| 59 |
+
|
| 60 |
+
# Convert to DataFrame
|
| 61 |
+
data = []
|
| 62 |
+
seen_conversation_ids = set()
|
| 63 |
+
|
| 64 |
+
for doc in docs:
|
| 65 |
+
conversation_id = doc.get('metadata', {}).get('properties', {}).get('conversation_id', 'N/A')
|
| 66 |
+
|
| 67 |
+
# Skip duplicates
|
| 68 |
+
if conversation_id in seen_conversation_ids:
|
| 69 |
+
continue
|
| 70 |
+
seen_conversation_ids.add(conversation_id)
|
| 71 |
+
|
| 72 |
+
analysis = doc.get('conversation_analysis', {})
|
| 73 |
+
insights = analysis.get('aggregated_marketing_insights', {})
|
| 74 |
+
quotes = insights.get('quotes', [])
|
| 75 |
+
|
| 76 |
+
# Extract primary sentiment
|
| 77 |
+
primary_sentiment = quotes[0].get('sentiment', 'Unknown') if quotes else 'Unknown'
|
| 78 |
+
|
| 79 |
+
# Format date
|
| 80 |
+
created_at = analysis.get('created_at', '')
|
| 81 |
+
if isinstance(created_at, str):
|
| 82 |
+
try:
|
| 83 |
+
# Parse and format date
|
| 84 |
+
dt = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
|
| 85 |
+
formatted_date = dt.strftime('%b %d, %Y %H:%M')
|
| 86 |
+
except:
|
| 87 |
+
formatted_date = created_at
|
| 88 |
+
elif hasattr(created_at, 'strftime'):
|
| 89 |
+
formatted_date = created_at.strftime('%b %d, %Y %H:%M')
|
| 90 |
+
else:
|
| 91 |
+
formatted_date = str(created_at)
|
| 92 |
+
|
| 93 |
+
# Get full summary without truncation
|
| 94 |
+
full_summary = analysis.get('aggregated_contextual_summary', 'No summary available')
|
| 95 |
+
|
| 96 |
+
# Get a simple insights summary for the table
|
| 97 |
+
marketing_insights = analysis.get('aggregated_marketing_insights', {})
|
| 98 |
+
insights_count = 0
|
| 99 |
+
|
| 100 |
+
if isinstance(marketing_insights, dict):
|
| 101 |
+
quotes_count = len(marketing_insights.get('quotes', []))
|
| 102 |
+
findings_count = len(marketing_insights.get('key_findings', []))
|
| 103 |
+
insights_count = quotes_count + findings_count
|
| 104 |
+
|
| 105 |
+
insights_text = f"{insights_count} insights available" if insights_count > 0 else "No insights available"
|
| 106 |
+
|
| 107 |
+
data.append({
|
| 108 |
+
'conversation_id': conversation_id,
|
| 109 |
+
'quality_score': round(doc.get('content_quality_score', 0.0), 2),
|
| 110 |
+
'sentiment': primary_sentiment,
|
| 111 |
+
'summary': full_summary,
|
| 112 |
+
'insights': insights_text,
|
| 113 |
+
'date': formatted_date
|
| 114 |
+
})
|
| 115 |
+
|
| 116 |
+
return pd.DataFrame(data)
|
| 117 |
+
|
| 118 |
+
except Exception as e:
|
| 119 |
+
print(f"β Error loading conversations: {e}")
|
| 120 |
+
return pd.DataFrame()
|
| 121 |
+
|
| 122 |
+
def get_conversation_details(self, conversation_id: str) -> str:
|
| 123 |
+
"""Get detailed analysis for a specific conversation."""
|
| 124 |
+
try:
|
| 125 |
+
doc = self.collection.find_one({
|
| 126 |
+
'metadata.properties.conversation_id': conversation_id,
|
| 127 |
+
'conversation_analysis': {'$exists': True}
|
| 128 |
+
})
|
| 129 |
+
|
| 130 |
+
if not doc:
|
| 131 |
+
return "<p>β Conversation not found</p>"
|
| 132 |
+
|
| 133 |
+
analysis = doc.get('conversation_analysis', {})
|
| 134 |
+
insights = analysis.get('aggregated_marketing_insights', {})
|
| 135 |
+
|
| 136 |
+
# Format the HTML content
|
| 137 |
+
html_content = f"""
|
| 138 |
+
<div class="conversation-details">
|
| 139 |
+
<h3>π Conversation Analysis: {conversation_id}</h3>
|
| 140 |
+
|
| 141 |
+
<div class="section">
|
| 142 |
+
<h4>π Summary (Contextual Summary)</h4>
|
| 143 |
+
<div class="content-box">
|
| 144 |
+
<p>{analysis.get('aggregated_contextual_summary', 'No summary available')}</p>
|
| 145 |
+
</div>
|
| 146 |
+
</div>
|
| 147 |
+
|
| 148 |
+
<div class="section">
|
| 149 |
+
<h4>π‘ Insights</h4>
|
| 150 |
+
"""
|
| 151 |
+
|
| 152 |
+
# Add quotes
|
| 153 |
+
quotes = insights.get('quotes', [])
|
| 154 |
+
if quotes:
|
| 155 |
+
html_content += "<h5>π Key Quotes:</h5><ul>"
|
| 156 |
+
for i, quote in enumerate(quotes, 1):
|
| 157 |
+
sentiment_class = f"sentiment-{quote.get('sentiment', 'neutral').lower()}"
|
| 158 |
+
html_content += f"""
|
| 159 |
+
<li>
|
| 160 |
+
<div class="quote-item">
|
| 161 |
+
<p><strong>Quote {i}:</strong> "{quote.get('quote', '')}"</p>
|
| 162 |
+
<p><strong>Context:</strong> {quote.get('context', '')}</p>
|
| 163 |
+
<p><strong>Sentiment:</strong> <span class="{sentiment_class}">{quote.get('sentiment', 'Unknown')}</span></p>
|
| 164 |
+
</div>
|
| 165 |
+
</li>
|
| 166 |
+
"""
|
| 167 |
+
html_content += "</ul>"
|
| 168 |
+
|
| 169 |
+
# Add key findings
|
| 170 |
+
findings = insights.get('key_findings', [])
|
| 171 |
+
if findings:
|
| 172 |
+
html_content += "<h5>π Key Findings:</h5><ul>"
|
| 173 |
+
for i, finding in enumerate(findings, 1):
|
| 174 |
+
impact_class = f"impact-{finding.get('impact', 'medium').lower()}"
|
| 175 |
+
html_content += f"""
|
| 176 |
+
<li>
|
| 177 |
+
<div class="finding-item">
|
| 178 |
+
<p><strong>Finding {i}:</strong> {finding.get('finding', '')}</p>
|
| 179 |
+
<p><strong>Evidence:</strong> {finding.get('evidence', '')}</p>
|
| 180 |
+
<p><strong>Impact:</strong> <span class="{impact_class}">{finding.get('impact', 'Unknown')}</span></p>
|
| 181 |
+
</div>
|
| 182 |
+
</li>
|
| 183 |
+
"""
|
| 184 |
+
html_content += "</ul>"
|
| 185 |
+
|
| 186 |
+
# Add follow-up email
|
| 187 |
+
follow_up_email = analysis.get('follow_up_email', '')
|
| 188 |
+
if follow_up_email:
|
| 189 |
+
html_content += f"""
|
| 190 |
+
<div class="section">
|
| 191 |
+
<h4>π§ Follow-up Email</h4>
|
| 192 |
+
<div class="content-box">
|
| 193 |
+
<pre>{follow_up_email}</pre>
|
| 194 |
+
</div>
|
| 195 |
+
</div>
|
| 196 |
+
"""
|
| 197 |
+
|
| 198 |
+
html_content += "</div>"
|
| 199 |
+
|
| 200 |
+
return html_content
|
| 201 |
+
|
| 202 |
+
except Exception as e:
|
| 203 |
+
return f"<p>β Error loading conversation details: {e}</p>"
|
| 204 |
+
|
| 205 |
+
def setup_ui(self):
|
| 206 |
+
"""Setup the Gradio interface."""
|
| 207 |
+
with gr.Blocks(
|
| 208 |
+
title="Conversation Analysis Dashboard",
|
| 209 |
+
theme=gr.themes.Soft(),
|
| 210 |
+
css="""
|
| 211 |
+
.conversation-details {
|
| 212 |
+
max-width: 100%;
|
| 213 |
+
padding: 20px;
|
| 214 |
+
}
|
| 215 |
+
.section {
|
| 216 |
+
margin: 20px 0;
|
| 217 |
+
padding: 15px;
|
| 218 |
+
border: 1px solid #e0e0e0;
|
| 219 |
+
border-radius: 8px;
|
| 220 |
+
background-color: #f8f9fa;
|
| 221 |
+
}
|
| 222 |
+
.content-box {
|
| 223 |
+
background-color: white;
|
| 224 |
+
padding: 15px;
|
| 225 |
+
border-radius: 5px;
|
| 226 |
+
border: 1px solid #dee2e6;
|
| 227 |
+
margin: 10px 0;
|
| 228 |
+
}
|
| 229 |
+
.quote-item, .finding-item {
|
| 230 |
+
margin: 10px 0;
|
| 231 |
+
padding: 10px;
|
| 232 |
+
background-color: white;
|
| 233 |
+
border-radius: 5px;
|
| 234 |
+
border-left: 4px solid #007bff;
|
| 235 |
+
}
|
| 236 |
+
.sentiment-positive {
|
| 237 |
+
background-color: #d4edda;
|
| 238 |
+
color: #155724;
|
| 239 |
+
padding: 2px 8px;
|
| 240 |
+
border-radius: 4px;
|
| 241 |
+
font-weight: bold;
|
| 242 |
+
}
|
| 243 |
+
.sentiment-negative {
|
| 244 |
+
background-color: #f8d7da;
|
| 245 |
+
color: #721c24;
|
| 246 |
+
padding: 2px 8px;
|
| 247 |
+
border-radius: 4px;
|
| 248 |
+
font-weight: bold;
|
| 249 |
+
}
|
| 250 |
+
.sentiment-neutral {
|
| 251 |
+
background-color: #d1ecf1;
|
| 252 |
+
color: #0c5460;
|
| 253 |
+
padding: 2px 8px;
|
| 254 |
+
border-radius: 4px;
|
| 255 |
+
font-weight: bold;
|
| 256 |
+
}
|
| 257 |
+
.sentiment-confused {
|
| 258 |
+
background-color: #fff3cd;
|
| 259 |
+
color: #856404;
|
| 260 |
+
padding: 2px 8px;
|
| 261 |
+
border-radius: 4px;
|
| 262 |
+
font-weight: bold;
|
| 263 |
+
}
|
| 264 |
+
.impact-high {
|
| 265 |
+
background-color: #f8d7da;
|
| 266 |
+
color: #721c24;
|
| 267 |
+
padding: 2px 8px;
|
| 268 |
+
border-radius: 4px;
|
| 269 |
+
font-weight: bold;
|
| 270 |
+
}
|
| 271 |
+
.impact-medium {
|
| 272 |
+
background-color: #fff3cd;
|
| 273 |
+
color: #856404;
|
| 274 |
+
padding: 2px 8px;
|
| 275 |
+
border-radius: 4px;
|
| 276 |
+
font-weight: bold;
|
| 277 |
+
}
|
| 278 |
+
.impact-low {
|
| 279 |
+
background-color: #d4edda;
|
| 280 |
+
color: #155724;
|
| 281 |
+
padding: 2px 8px;
|
| 282 |
+
border-radius: 4px;
|
| 283 |
+
font-weight: bold;
|
| 284 |
+
}
|
| 285 |
+
.quality-high { color: #28a745; font-weight: bold; }
|
| 286 |
+
.quality-medium { color: #ffc107; font-weight: bold; }
|
| 287 |
+
.quality-low { color: #dc3545; font-weight: bold; }
|
| 288 |
+
"""
|
| 289 |
+
) as self.interface:
|
| 290 |
+
|
| 291 |
+
gr.Markdown("# π― Conversation Analysis Dashboard")
|
| 292 |
+
gr.Markdown("Analyze customer conversations with AI-powered insights, summaries, and follow-up emails.")
|
| 293 |
+
|
| 294 |
+
# Filters
|
| 295 |
+
with gr.Row():
|
| 296 |
+
with gr.Column(scale=2):
|
| 297 |
+
quality_range = gr.Slider(
|
| 298 |
+
minimum=0.0, maximum=1.0, value=[0.0, 1.0],
|
| 299 |
+
label="Quality Score Range", step=0.01
|
| 300 |
+
)
|
| 301 |
+
with gr.Column(scale=1):
|
| 302 |
+
sentiment_filter = gr.Dropdown(
|
| 303 |
+
choices=["All", "Positive", "Negative", "Neutral", "Confused"],
|
| 304 |
+
value="All", label="Sentiment Filter"
|
| 305 |
+
)
|
| 306 |
+
with gr.Column(scale=1):
|
| 307 |
+
search_text = gr.Textbox(
|
| 308 |
+
placeholder="Search conversations...", label="Search"
|
| 309 |
+
)
|
| 310 |
+
with gr.Column(scale=1):
|
| 311 |
+
refresh_btn = gr.Button("π Refresh", variant="primary")
|
| 312 |
+
|
| 313 |
+
# Main table
|
| 314 |
+
with gr.Row():
|
| 315 |
+
conversations_df = gr.Dataframe(
|
| 316 |
+
headers=["Conversation ID", "Quality", "Sentiment", "Summary", "Insights Count", "Date"],
|
| 317 |
+
datatype=["str", "number", "str", "str", "str", "str"],
|
| 318 |
+
interactive=False,
|
| 319 |
+
label="Conversations",
|
| 320 |
+
wrap=True, # Enable text wrapping
|
| 321 |
+
max_height=600 # Set max height for scrolling
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
# Detail view
|
| 325 |
+
with gr.Row():
|
| 326 |
+
with gr.Column():
|
| 327 |
+
detail_view = gr.HTML(
|
| 328 |
+
value="<p>Select a conversation from the table above to view detailed analysis</p>",
|
| 329 |
+
label="Conversation Details"
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
# Event handlers
|
| 333 |
+
def refresh_data(quality_range, sentiment, search):
|
| 334 |
+
if isinstance(quality_range, (list, tuple)) and len(quality_range) == 2:
|
| 335 |
+
quality_min, quality_max = quality_range
|
| 336 |
+
else:
|
| 337 |
+
quality_min, quality_max = 0.0, 1.0
|
| 338 |
+
df = self.load_conversations(quality_min, quality_max, sentiment, search, limit=1000)
|
| 339 |
+
return df
|
| 340 |
+
|
| 341 |
+
def on_table_select(evt: gr.SelectData):
|
| 342 |
+
if evt.index[0] is not None:
|
| 343 |
+
try:
|
| 344 |
+
# Get the conversation ID from the selected row
|
| 345 |
+
# We need to get the current dataframe from the table
|
| 346 |
+
current_df = self.load_conversations()
|
| 347 |
+
if not current_df.empty and evt.index[0] < len(current_df):
|
| 348 |
+
conversation_id = current_df.iloc[evt.index[0]]['conversation_id']
|
| 349 |
+
return self.get_conversation_details(conversation_id)
|
| 350 |
+
else:
|
| 351 |
+
return "<p>Please refresh the data first</p>"
|
| 352 |
+
except Exception as e:
|
| 353 |
+
return f"<p>Error: {e}</p>"
|
| 354 |
+
return "<p>Please select a conversation from the table</p>"
|
| 355 |
+
|
| 356 |
+
refresh_btn.click(
|
| 357 |
+
fn=refresh_data,
|
| 358 |
+
inputs=[quality_range, sentiment_filter, search_text],
|
| 359 |
+
outputs=[conversations_df]
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
conversations_df.select(
|
| 363 |
+
fn=on_table_select,
|
| 364 |
+
outputs=[detail_view]
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
# Load initial data when the page loads
|
| 368 |
+
def load_initial_data():
|
| 369 |
+
return self.load_conversations(limit=1000) # Load more conversations
|
| 370 |
+
|
| 371 |
+
# Set initial data using the interface's load event
|
| 372 |
+
self.interface.load(load_initial_data, outputs=[conversations_df])
|
| 373 |
+
|
| 374 |
+
def launch(self, **kwargs):
|
| 375 |
+
"""Launch the Gradio interface."""
|
| 376 |
+
self.interface.launch(**kwargs)
|
src/second_brain_online/config.py
CHANGED
|
@@ -44,7 +44,7 @@ class Settings(BaseSettings):
|
|
| 44 |
description="Name of the MongoDB database.",
|
| 45 |
)
|
| 46 |
MONGODB_COLLECTION_NAME: str = Field(
|
| 47 |
-
default="
|
| 48 |
description="Name of the MongoDB collection for RAG documents.",
|
| 49 |
)
|
| 50 |
MONGODB_URI: str = Field(
|
|
|
|
| 44 |
description="Name of the MongoDB database.",
|
| 45 |
)
|
| 46 |
MONGODB_COLLECTION_NAME: str = Field(
|
| 47 |
+
default="rag_intercom",
|
| 48 |
description="Name of the MongoDB collection for RAG documents.",
|
| 49 |
)
|
| 50 |
MONGODB_URI: str = Field(
|