Spaces:
Runtime error
Runtime error
openai key
Browse files- .gitignore +2 -2
- .gradio/certificate.pem +31 -0
- Gradio_UI.py +3 -2
- README_LOCAL.md +57 -0
- app.py +16 -8
- debug_test.py +100 -0
- requirements.txt +3 -2
- tests/run_dialog_demo.py +14 -0
- tools/dialog_handler.py +119 -0
- tools/interactive_cli.py +37 -0
.gitignore
CHANGED
|
@@ -11,12 +11,12 @@ env/
|
|
| 11 |
|
| 12 |
# Distribution / packaging
|
| 13 |
build/
|
| 14 |
-
dist
|
| 15 |
*.egg-info/
|
| 16 |
.eggs/
|
| 17 |
|
| 18 |
# Installer logs
|
| 19 |
-
pip-log.txt
|
| 20 |
pip-delete-this-directory.txt
|
| 21 |
|
| 22 |
# Unit test / coverage
|
|
|
|
| 11 |
|
| 12 |
# Distribution / packaging
|
| 13 |
build/
|
| 14 |
+
dist/``
|
| 15 |
*.egg-info/
|
| 16 |
.eggs/
|
| 17 |
|
| 18 |
# Installer logs
|
| 19 |
+
pip-log.txt``
|
| 20 |
pip-delete-this-directory.txt
|
| 21 |
|
| 22 |
# Unit test / coverage
|
.gradio/certificate.pem
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-----BEGIN CERTIFICATE-----
|
| 2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
| 3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
| 4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
| 5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
| 6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
| 7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
| 8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
| 9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
| 10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
| 11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
| 12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
| 13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
| 14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
| 15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
| 16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
| 17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
| 18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
| 19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
| 20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
| 21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
| 22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
| 23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
| 24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
| 25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
| 26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
| 27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
| 28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
| 29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
| 30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
| 31 |
+
-----END CERTIFICATE-----
|
Gradio_UI.py
CHANGED
|
@@ -141,9 +141,10 @@ def stream_to_gradio(
|
|
| 141 |
|
| 142 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
| 143 |
# Track tokens if model provides them
|
| 144 |
-
if hasattr(agent.model, "last_input_token_count"):
|
| 145 |
total_input_tokens += agent.model.last_input_token_count
|
| 146 |
-
|
|
|
|
| 147 |
if isinstance(step_log, ActionStep):
|
| 148 |
step_log.input_token_count = agent.model.last_input_token_count
|
| 149 |
step_log.output_token_count = agent.model.last_output_token_count
|
|
|
|
| 141 |
|
| 142 |
for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
|
| 143 |
# Track tokens if model provides them
|
| 144 |
+
if hasattr(agent.model, "last_input_token_count") and agent.model.last_input_token_count is not None:
|
| 145 |
total_input_tokens += agent.model.last_input_token_count
|
| 146 |
+
if agent.model.last_output_token_count is not None:
|
| 147 |
+
total_output_tokens += agent.model.last_output_token_count
|
| 148 |
if isinstance(step_log, ActionStep):
|
| 149 |
step_log.input_token_count = agent.model.last_input_token_count
|
| 150 |
step_log.output_token_count = agent.model.last_output_token_count
|
README_LOCAL.md
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
First Agent Template — Local Run Guide
|
| 2 |
+
|
| 3 |
+
This file explains how to run the prototype locally (macOS, zsh).
|
| 4 |
+
|
| 5 |
+
Assumptions
|
| 6 |
+
- Project root: /Users/gautam.kumar/Desktop/agents/First_agent_template
|
| 7 |
+
- You will use the included virtualenv `.venv` or create one.
|
| 8 |
+
|
| 9 |
+
1) Create & activate a venv (if needed)
|
| 10 |
+
|
| 11 |
+
```bash
|
| 12 |
+
cd /Users/gautam.kumar/Desktop/agents/First_agent_template
|
| 13 |
+
python3 -m venv .venv
|
| 14 |
+
source .venv/bin/activate
|
| 15 |
+
```
|
| 16 |
+
|
| 17 |
+
2) Install dependencies
|
| 18 |
+
|
| 19 |
+
```bash
|
| 20 |
+
pip install --upgrade pip
|
| 21 |
+
pip install -r requirements.txt
|
| 22 |
+
# optional: install gradio UI extras
|
| 23 |
+
pip install "smolagents[gradio]"
|
| 24 |
+
```
|
| 25 |
+
|
| 26 |
+
3) Smoke tests
|
| 27 |
+
|
| 28 |
+
```bash
|
| 29 |
+
PYTHONPATH=$(pwd) python tests/run_nlu_test.py
|
| 30 |
+
```
|
| 31 |
+
|
| 32 |
+
4) Demo run
|
| 33 |
+
|
| 34 |
+
```bash
|
| 35 |
+
PYTHONPATH=$(pwd) python tests/run_dialog_demo.py
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
5) Interactive CLI
|
| 39 |
+
|
| 40 |
+
```bash
|
| 41 |
+
PYTHONPATH=$(pwd) python tools/interactive_cli.py
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
Interactive CLI tips
|
| 45 |
+
- Type natural sentences like: "I can pay ¥30000 by next Friday"
|
| 46 |
+
- If assistant asks for missing info ("Can I confirm your full name?"), reply with the requested info.
|
| 47 |
+
- To request a human operator: "I want to talk to an operator tomorrow morning" — the assistant will propose JST slots.
|
| 48 |
+
- Type `exit` or `quit` to leave.
|
| 49 |
+
|
| 50 |
+
Data & files
|
| 51 |
+
- `data/requests.json` — saved handoff requests (JSON).
|
| 52 |
+
- `data/operator_notifications.log` — operator notification stub logs.
|
| 53 |
+
|
| 54 |
+
If you want, I can:
|
| 55 |
+
- Add this file into the main `README.md` (merge), or keep it separate.
|
| 56 |
+
- Add unit tests and CI configs.
|
| 57 |
+
- Improve the CLI to track dialogue state across multiple turns more robustly.
|
app.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
from smolagents import CodeAgent,DuckDuckGoSearchTool,
|
| 2 |
import datetime
|
| 3 |
import requests
|
| 4 |
import pytz
|
|
@@ -98,14 +98,22 @@ def get_current_time_in_timezone(timezone: str) -> str:
|
|
| 98 |
|
| 99 |
final_answer = FinalAnswerTool()
|
| 100 |
|
| 101 |
-
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
| 102 |
-
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
| 103 |
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
)
|
| 110 |
|
| 111 |
|
|
|
|
| 1 |
+
from smolagents import CodeAgent, DuckDuckGoSearchTool, OpenAIServerModel, load_tool, tool
|
| 2 |
import datetime
|
| 3 |
import requests
|
| 4 |
import pytz
|
|
|
|
| 98 |
|
| 99 |
final_answer = FinalAnswerTool()
|
| 100 |
|
|
|
|
|
|
|
| 101 |
|
| 102 |
+
# Get OpenAI API key from environment variable
|
| 103 |
+
openai_api_key = os.getenv('OPENAI_API_KEY')
|
| 104 |
+
if not openai_api_key:
|
| 105 |
+
raise ValueError("Please set the OPENAI_API_KEY environment variable")
|
| 106 |
+
|
| 107 |
+
import urllib3
|
| 108 |
+
# Disable SSL verification warnings
|
| 109 |
+
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
| 110 |
+
|
| 111 |
+
model = OpenAIServerModel(
|
| 112 |
+
api_key=openai_api_key,
|
| 113 |
+
model_id="gpt-5-mini-2025-08-07", # Using GPT-4 Turbo, you can change to gpt-3.5-turbo for lower cost
|
| 114 |
+
max_tokens=2096,
|
| 115 |
+
# verify_ssl=False, # Disable SSL verification
|
| 116 |
+
temperature=0.5,
|
| 117 |
)
|
| 118 |
|
| 119 |
|
debug_test.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import pytz
|
| 3 |
+
from tools.nlu_tool import extract_intent_and_slots
|
| 4 |
+
from tools.scheduler import find_common_slots
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from smolagents import OpenAIServerModel
|
| 7 |
+
|
| 8 |
+
def test_timezone_function():
|
| 9 |
+
"""Test the timezone functionality"""
|
| 10 |
+
try:
|
| 11 |
+
tz = pytz.timezone('America/New_York')
|
| 12 |
+
local_time = datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
|
| 13 |
+
print("Timezone test passed:", local_time)
|
| 14 |
+
except Exception as e:
|
| 15 |
+
print("Timezone test failed:", str(e))
|
| 16 |
+
|
| 17 |
+
def test_nlu():
|
| 18 |
+
"""Test the NLU functionality"""
|
| 19 |
+
try:
|
| 20 |
+
# Test with an amount in yen
|
| 21 |
+
test_text = "I need to transfer ¥30,000"
|
| 22 |
+
result = extract_intent_and_slots(test_text)
|
| 23 |
+
print("NLU test result (amount):", result)
|
| 24 |
+
|
| 25 |
+
# Test with a date/time
|
| 26 |
+
test_text_2 = "Schedule for tomorrow at 2pm"
|
| 27 |
+
result_2 = extract_intent_and_slots(test_text_2)
|
| 28 |
+
print("NLU test result (schedule):", result_2)
|
| 29 |
+
except Exception as e:
|
| 30 |
+
print("NLU test failed:", str(e))
|
| 31 |
+
|
| 32 |
+
def test_scheduler():
|
| 33 |
+
"""Test the scheduler functionality"""
|
| 34 |
+
try:
|
| 35 |
+
# Create proper datetime objects in Asia/Tokyo timezone
|
| 36 |
+
import dateparser
|
| 37 |
+
from datetime import datetime, timedelta
|
| 38 |
+
tz = pytz.timezone('Asia/Tokyo')
|
| 39 |
+
settings = {'TIMEZONE': 'Asia/Tokyo', 'RETURN_AS_TIMEZONE_AWARE': True}
|
| 40 |
+
|
| 41 |
+
tomorrow = datetime.now() + timedelta(days=1)
|
| 42 |
+
start_str = f"tomorrow 09:00"
|
| 43 |
+
end_str = f"tomorrow 12:00"
|
| 44 |
+
|
| 45 |
+
start = dateparser.parse(start_str, settings=settings)
|
| 46 |
+
end = dateparser.parse(end_str, settings=settings)
|
| 47 |
+
|
| 48 |
+
test_windows = [
|
| 49 |
+
{'start': start, 'end': end}
|
| 50 |
+
]
|
| 51 |
+
result = find_common_slots(test_windows)
|
| 52 |
+
print("Scheduler test result:", result)
|
| 53 |
+
except Exception as e:
|
| 54 |
+
print("Scheduler test failed:", str(e))
|
| 55 |
+
|
| 56 |
+
def test_openai_connection():
|
| 57 |
+
"""Test the OpenAI API connection"""
|
| 58 |
+
try:
|
| 59 |
+
print("Testing OpenAI API connection...")
|
| 60 |
+
openai_api_key = os.getenv('OPENAI_API_KEY')
|
| 61 |
+
if not openai_api_key:
|
| 62 |
+
print("❌ Error: OPENAI_API_KEY environment variable is not set")
|
| 63 |
+
return
|
| 64 |
+
|
| 65 |
+
model = OpenAIServerModel(
|
| 66 |
+
api_key=openai_api_key,
|
| 67 |
+
model_id="gpt-5-mini-2025-08-07",
|
| 68 |
+
max_tokens=50,
|
| 69 |
+
temperature=0.5,
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
# Try a simple chat completion to test the connection
|
| 73 |
+
test_prompt = "Say 'Hello, the API is working!'"
|
| 74 |
+
try:
|
| 75 |
+
response = model.generate_text(test_prompt)
|
| 76 |
+
print("✅ OpenAI API Test Response:", response)
|
| 77 |
+
except Exception as e:
|
| 78 |
+
print("❌ API Call Error:", str(e))
|
| 79 |
+
print("Error details:", str(type(e).__name__))
|
| 80 |
+
|
| 81 |
+
except Exception as e:
|
| 82 |
+
print("❌ OpenAI Setup Error:", str(e))
|
| 83 |
+
|
| 84 |
+
def main():
|
| 85 |
+
print("Starting debug tests...")
|
| 86 |
+
|
| 87 |
+
print("\n1. Testing OpenAI API connection:")
|
| 88 |
+
test_openai_connection()
|
| 89 |
+
|
| 90 |
+
print("\n2. Testing timezone functionality:")
|
| 91 |
+
test_timezone_function()
|
| 92 |
+
|
| 93 |
+
print("\n3. Testing NLU:")
|
| 94 |
+
test_nlu()
|
| 95 |
+
|
| 96 |
+
print("\n4. Testing scheduler:")
|
| 97 |
+
test_scheduler()
|
| 98 |
+
|
| 99 |
+
if __name__ == "__main__":
|
| 100 |
+
main()
|
requirements.txt
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
markdownify
|
| 2 |
-
smolagents
|
|
|
|
| 3 |
requests
|
| 4 |
duckduckgo_search
|
| 5 |
pandas
|
| 6 |
-
dateparser
|
|
|
|
| 1 |
markdownify
|
| 2 |
+
smolagents[gradio]
|
| 3 |
+
# smolagents==1.13.0
|
| 4 |
requests
|
| 5 |
duckduckgo_search
|
| 6 |
pandas
|
| 7 |
+
dateparser
|
tests/run_dialog_demo.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from tools.dialog_handler import handle_message
|
| 2 |
+
|
| 3 |
+
print('--- Payment commitment flow ---')
|
| 4 |
+
msg = 'I can pay ¥25000 by 2025-10-10'
|
| 5 |
+
out = handle_message(msg)
|
| 6 |
+
print('User:', msg)
|
| 7 |
+
print('Assistant:', out['response'])
|
| 8 |
+
print('Created request:', out['request'])
|
| 9 |
+
|
| 10 |
+
print('\n--- Request human operator flow ---')
|
| 11 |
+
msg2 = 'I want to talk to an operator tomorrow morning'
|
| 12 |
+
out2 = handle_message(msg2)
|
| 13 |
+
print('User:', msg2)
|
| 14 |
+
print('Assistant:', out2['response'])
|
tools/dialog_handler.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import typing
|
| 2 |
+
from tools import nlu_tool, scheduler, requests_store
|
| 3 |
+
from app import propose_slots, notify_operator
|
| 4 |
+
import yaml
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
PROMPTS_PATH = 'prompts.yaml'
|
| 8 |
+
|
| 9 |
+
def _load_prompts():
|
| 10 |
+
with open(PROMPTS_PATH, 'r') as f:
|
| 11 |
+
txt = f.read()
|
| 12 |
+
# If file contains a fenced code block (```yaml ... ```), strip fences
|
| 13 |
+
stripped = txt
|
| 14 |
+
lines = txt.splitlines()
|
| 15 |
+
if lines and lines[0].strip().startswith("```") and lines[-1].strip().startswith("```"):
|
| 16 |
+
stripped = "\n".join(lines[1:-1])
|
| 17 |
+
|
| 18 |
+
data = yaml.safe_load(stripped)
|
| 19 |
+
|
| 20 |
+
# recursively search for collection_assistant
|
| 21 |
+
def find_collection(obj):
|
| 22 |
+
if isinstance(obj, dict):
|
| 23 |
+
if 'collection_assistant' in obj:
|
| 24 |
+
return obj['collection_assistant']
|
| 25 |
+
for v in obj.values():
|
| 26 |
+
found = find_collection(v)
|
| 27 |
+
if found is not None:
|
| 28 |
+
return found
|
| 29 |
+
elif isinstance(obj, list):
|
| 30 |
+
for item in obj:
|
| 31 |
+
found = find_collection(item)
|
| 32 |
+
if found is not None:
|
| 33 |
+
return found
|
| 34 |
+
return None
|
| 35 |
+
|
| 36 |
+
found = find_collection(data)
|
| 37 |
+
if found is not None:
|
| 38 |
+
# If the value is a YAML block string (from |-) parse it into a dict
|
| 39 |
+
if isinstance(found, str):
|
| 40 |
+
try:
|
| 41 |
+
inner = yaml.safe_load(found)
|
| 42 |
+
if isinstance(inner, dict):
|
| 43 |
+
return inner
|
| 44 |
+
except Exception:
|
| 45 |
+
pass
|
| 46 |
+
return found
|
| 47 |
+
raise KeyError('collection_assistant')
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def handle_message(message: str, context: dict = None) -> dict:
|
| 51 |
+
"""High-level handler that accepts a user message and performs:
|
| 52 |
+
- NLU (intent + slots)
|
| 53 |
+
- Minimal slot-filling for payment_commitment
|
| 54 |
+
- Confirmation and request creation
|
| 55 |
+
- Scheduling flow for human-operator requests
|
| 56 |
+
|
| 57 |
+
Returns a dict with response text and any created request record.
|
| 58 |
+
"""
|
| 59 |
+
prompts = _load_prompts()
|
| 60 |
+
nlu = nlu_tool.extract_intent_and_slots(message)
|
| 61 |
+
intent = nlu.get('intent')
|
| 62 |
+
slots = nlu.get('slots', {})
|
| 63 |
+
response = ""
|
| 64 |
+
created = None
|
| 65 |
+
|
| 66 |
+
if intent == 'payment_commitment':
|
| 67 |
+
# find payment_commitment intent meta
|
| 68 |
+
slot_meta = next((x for x in prompts.get('intents', []) if x.get('name') == 'payment_commitment'), None)
|
| 69 |
+
# ensure required slots: customer_name, account_number, amount, date_by_when
|
| 70 |
+
missing = []
|
| 71 |
+
for s in ['customer_name', 'account_number', 'amount', 'date_by_when']:
|
| 72 |
+
if s not in slots or not slots[s]:
|
| 73 |
+
missing.append(s)
|
| 74 |
+
|
| 75 |
+
if missing:
|
| 76 |
+
# return prompt for the first missing slot (simple)
|
| 77 |
+
if slot_meta:
|
| 78 |
+
slot_prompt = next((x.get('prompt') for x in slot_meta.get('slots', []) if x.get('id') == missing[0]), None)
|
| 79 |
+
response = slot_prompt or f"Please provide {missing[0]}"
|
| 80 |
+
else:
|
| 81 |
+
response = f"Please provide {missing[0]}"
|
| 82 |
+
return {'response': response, 'request': None}
|
| 83 |
+
|
| 84 |
+
# All required slots present -> confirm and create request
|
| 85 |
+
tpl = slot_meta.get('confirmation_template') if slot_meta else "Thank you. I recorded your commitment."
|
| 86 |
+
response = tpl.format(customer_name=slots.get('customer_name', '[unknown]'), account_number=slots.get('account_number', '[unknown]'), amount=slots.get('amount'), date_by_when=slots.get('date_by_when'))
|
| 87 |
+
|
| 88 |
+
handoff_tpl = slot_meta.get('handoff_payload_template') if slot_meta else None
|
| 89 |
+
if handoff_tpl:
|
| 90 |
+
handoff_json = handoff_tpl.format(customer_name=slots.get('customer_name', ''), account_number=slots.get('account_number', ''), amount=slots.get('amount', ''), date_by_when=slots.get('date_by_when', ''), contact_preference=slots.get('contact_preference', ''), nlu_confidence=nlu.get('nlu_confidence', 0))
|
| 91 |
+
else:
|
| 92 |
+
handoff_json = str({'type': 'payment_commitment', 'slots': slots})
|
| 93 |
+
|
| 94 |
+
# persist
|
| 95 |
+
created = requests_store.create_request({'raw_payload': handoff_json})
|
| 96 |
+
|
| 97 |
+
# notify operator stub
|
| 98 |
+
notify_operator(f"HANDOFF: {handoff_json}")
|
| 99 |
+
|
| 100 |
+
return {'response': response, 'request': created}
|
| 101 |
+
|
| 102 |
+
if intent == 'request_human_operator':
|
| 103 |
+
# find intent meta
|
| 104 |
+
req_meta = next((x for x in prompts.get('intents', []) if x.get('name') == 'request_human_operator'), None)
|
| 105 |
+
preferred = []
|
| 106 |
+
if 'preferred_windows' in slots and slots['preferred_windows']:
|
| 107 |
+
preferred = [{'start': slots['preferred_windows'], 'end': slots['preferred_windows']}]
|
| 108 |
+
candidates = propose_slots(preferred)
|
| 109 |
+
if req_meta:
|
| 110 |
+
response = req_meta.get('propose_slots_template', '').format(slot1_local=candidates[0] if len(candidates) > 0 else '', slot2_local=candidates[1] if len(candidates) > 1 else '', slot3_local=candidates[2] if len(candidates) > 2 else '')
|
| 111 |
+
else:
|
| 112 |
+
response = f"I can connect you at: {', '.join(candidates)}"
|
| 113 |
+
return {'response': response, 'request': None}
|
| 114 |
+
|
| 115 |
+
# fallback
|
| 116 |
+
response = prompts.get('behaviors', {}).get('fallback', {}).get('prompt', "I didn't understand. Can you rephrase?")
|
| 117 |
+
if not response:
|
| 118 |
+
response = "I didn't understand. Can you rephrase?"
|
| 119 |
+
return {'response': response, 'request': None}
|
tools/interactive_cli.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
from tools.dialog_handler import handle_message
|
| 5 |
+
from tools import nlu_tool
|
| 6 |
+
|
| 7 |
+
def prompt_loop():
|
| 8 |
+
print("Interactive CLI for collection assistant. Type 'exit' to quit.")
|
| 9 |
+
context = {}
|
| 10 |
+
while True:
|
| 11 |
+
msg = input('You: ').strip()
|
| 12 |
+
if msg.lower() in ('exit','quit'):
|
| 13 |
+
print('Goodbye')
|
| 14 |
+
break
|
| 15 |
+
|
| 16 |
+
# run NLU first to see what is missing
|
| 17 |
+
nlu = nlu_tool.extract_intent_and_slots(msg)
|
| 18 |
+
intent = nlu.get('intent')
|
| 19 |
+
print(f"(NLU -> intent={intent}, slots={nlu.get('slots')})")
|
| 20 |
+
|
| 21 |
+
# call handler - it will return either a prompt for missing slot or a confirmation
|
| 22 |
+
out = handle_message(msg, context=context)
|
| 23 |
+
print('Assistant:', out['response'])
|
| 24 |
+
|
| 25 |
+
# If handler requested a missing slot (no request created), accept next input as value and append to message
|
| 26 |
+
if out.get('request') is None and 'Please provide' in out['response'] or out['response'].endswith('?'):
|
| 27 |
+
# ask user to provide the requested info and re-run
|
| 28 |
+
follow = input('You (reply): ').strip()
|
| 29 |
+
# naive: append to previous message for NLU re-processing
|
| 30 |
+
combined = follow
|
| 31 |
+
out2 = handle_message(combined, context=context)
|
| 32 |
+
print('Assistant:', out2['response'])
|
| 33 |
+
if out2.get('request'):
|
| 34 |
+
print('Created request:', out2['request'])
|
| 35 |
+
|
| 36 |
+
if __name__ == '__main__':
|
| 37 |
+
prompt_loop()
|