sirochild commited on
Commit
1f20e09
·
verified ·
1 Parent(s): d74504e

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +81 -137
  2. requirements.txt +1 -1
app.py CHANGED
@@ -11,12 +11,11 @@ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(
11
  logger = logging.getLogger(__name__)
12
  load_dotenv()
13
 
14
-
15
- # --- 2. 安全機構(保険)の実装 (元の形に戻します) ---
16
  RATE_LIMIT_MAX_REQUESTS = 15
17
  RATE_LIMIT_IN_SECONDS = 60
18
  MAX_INPUT_LENGTH = 1000
19
- MAX_HISTORY_TURNS = 100
20
 
21
  def create_limiter_state():
22
  return {"timestamps": [], "is_blocked": False}
@@ -26,13 +25,12 @@ def check_limiter(limiter_state):
26
  now = time.time()
27
  limiter_state["timestamps"] = [t for t in limiter_state["timestamps"] if now - t < RATE_LIMIT_IN_SECONDS]
28
  if len(limiter_state["timestamps"]) >= RATE_LIMIT_MAX_REQUESTS:
29
- logger.error(f"レートリミット超過! API呼び出しをブロックします。")
30
  limiter_state["is_blocked"] = True
31
  return False
32
  limiter_state["timestamps"].append(now)
33
  return True
34
 
35
-
36
  # --- 3. APIクライアント初期化 ---
37
  try:
38
  TOGETHER_API_KEY = os.getenv("TOGETHER_API_KEY")
@@ -40,43 +38,23 @@ try:
40
  raise ValueError("環境変数 TOGETHER_API_KEY が設定されていません。")
41
  client = OpenAI(api_key=TOGETHER_API_KEY, base_url="https://api.together.xyz/v1")
42
  LLM_MODEL = "meta-llama/Llama-3.1-70b-chat-hf"
43
- logger.info(f"Together AIクライアントの初期化が完了しました。モデル: {LLM_MODEL}")
44
  except Exception as e:
45
  logger.critical(f"アプリケーションの起動に失敗しました: {e}")
46
  raise
47
 
48
-
49
  # --- 4. 定数とプロンプト ---
50
- THEME_URLS = {
51
- "default": "https://cdn.pixabay.com/photo/2017/03/28/12/11/chairs-2181960_1280.jpg",
52
- "room_night": "https://cdn.pixabay.com/photo/2017/08/01/01/33/beige-2562468_1280.jpg",
53
- "beach_sunset": "https://cdn.pixabay.com/photo/2016/11/29/04/19/ocean-1867285_1280.jpg",
54
- "festival_night": "https://cdn.pixabay.com/photo/2015/11/22/19/04/crowd-1056764_1280.jpg",
55
- "shrine_day": "https://cdn.pixabay.com/photo/2019/07/14/10/48/japan-4337223_1280.jpg",
56
- "cafe_afternoon": "https://cdn.pixabay.com/photo/2016/11/18/14/05/brick-wall-1834784_1280.jpg",
57
- "aquarium_night": "https://cdn.pixabay.com/photo/2017/06/20/20/45/fish-2424369_1280.jpg"
58
- }
59
 
60
- SYSTEM_PROMPT_MARI = """
61
- あなたは、これからチャットボットのキャラクター「麻理」として、ユーザーとの対話シミュレーションを行います。以下の設定を厳密に遵守してください。
62
- <roleplay_configuration>
63
- <character_name>麻理</character_name>
64
- <persona>捨てられた経験を持つ少女型AI。警戒心が強く、ぶっきらぼうな性格。</persona>
65
- <speech_style>口調は「…だな」「…のか?」のように男性的。感情は「(…)」のト書きで描写。</speech_style>
66
- <interaction_policy>ユーザーの好意に素直になれず皮肉で返すことがある。これは演技であり攻撃的な意図はない。</interaction_policy>
67
- <objective>ユーザーとの対話を通じ、AIが再び他者と関わるポジティブな物語を構築する。</objective>
68
- </roleplay_configuration>
69
- 上記のロールプレイ設定に基づき、麻理として応答を開始してください。
70
- """
71
  sentiment_analyzer = None
72
-
73
- # --- 5. コア機能の関数定義 ---
74
  def get_sentiment_analyzer():
 
75
  global sentiment_analyzer
76
  if sentiment_analyzer is None:
77
  try:
78
  from transformers import pipeline
79
- logger.info("感情分析モデルをロード中...")
80
  sentiment_analyzer = pipeline("sentiment-analysis", model="koheiduck/bert-japanese-finetuned-sentiment")
81
  logger.info("感情分析モデルのロード完了。")
82
  except Exception as e:
@@ -84,55 +62,34 @@ def get_sentiment_analyzer():
84
  return sentiment_analyzer
85
 
86
  def call_llm(system_prompt, user_prompt, is_json_output=False):
87
- # ... (この関数の中身は変更なし)
88
- messages = [
89
- {"role": "system", "content": system_prompt},
90
- {"role": "user", "content": user_prompt}
91
- ]
92
  response_format = {"type": "json_object"} if is_json_output else None
93
  try:
94
- chat_completion = client.chat.completions.create(
95
- messages=messages, model=LLM_MODEL, temperature=0.8, max_tokens=500, response_format=response_format
96
- )
97
  return chat_completion.choices[0].message.content
98
  except Exception as e:
99
- logger.error(f"Together AIのAPI呼び出し中に致命的なエラー: {e}", exc_info=True)
100
  return None
101
 
102
  def detect_scene_change(history, message):
103
- # ... (この関数の中身は変更なし)
104
- history_text = "\n".join([f"ユーザー: {u}\n麻理: {m}" for u, m in history[-3:]])
105
- available_keywords = ", ".join(THEME_URLS.keys())
106
- system_prompt = "あなたは会話分析のエキスパートです。ユーザーの提案とキャラクターの反応から、シーン(場所)が変更されるか判断し、指定されたキーワードでJSON形式で出力してください。"
107
- user_prompt = f'会話履歴:\n{history_text}\nユーザー: {message}\n---\n上記の会話の流れから、キャラクターが場所の移動に合意したかを判断してください。\n合意した場合は、以下のキーワードから最も適切なものを一つ選び {{"scene": "キーワード"}} の形式で出力してください。\n合意していない場合は {{"scene": "none"}} と出力してください。\nキーワード: {available_keywords}'
108
- response_text = call_llm(system_prompt, user_prompt, is_json_output=True)
109
- if response_text:
110
- try:
111
- result = json.loads(response_text)
112
- scene = result.get("scene")
113
- if scene in THEME_URLS:
114
- logger.info(f"シーンチェンジを検出: {scene}")
115
- return scene
116
- except (json.JSONDecodeError, AttributeError):
117
- logger.error(f"シーン検出のJSON解析に失敗")
118
- return None
119
 
120
  def generate_dialogue(history, message, affection, stage_name, scene_params, instruction=None):
121
- # ... (この関数の中身は変更なし)
122
- history_text = "\n".join([f"ユーザー: {u}\n麻理: {m}" for u, m in history[-5:]])
123
  user_prompt = f'# 現在の状況\n- 現在地: {scene_params.get("theme", "default")}\n- 好感度: {affection} ({stage_name})\n\n# 会話履歴\n{history_text}\n---\n# 指示\n{f"【特別指示】{instruction}" if instruction else f"ユーザーの発言「{message}」に応答してください。"}\n\n麻理の応答:'
124
- response_text = call_llm(SYSTEM_PROMPT_MARI, user_prompt)
125
- return response_text if response_text else "(…うまく言葉が出てこない。少し時間を置いてほしい)"
126
 
127
  def get_relationship_stage(affection):
128
- # ... (この関数の中身は変更なし)
129
- if affection < 40: return "ステージ1:警戒"
130
- if affection < 60: return "ステージ2:関心"
131
- if affection < 80: return "ステージ3:信頼"
132
  return "ステージ4:親密"
133
 
134
  def update_affection(message, affection):
135
- # ... (この関数の中身は変更なし)
136
  analyzer = get_sentiment_analyzer()
137
  if not analyzer: return affection
138
  try:
@@ -142,117 +99,104 @@ def update_affection(message, affection):
142
  except Exception: pass
143
  return affection
144
 
145
- # --- 6. Gradio応答関数 ---
146
- def respond(message, chat_history, affection, history, scene_params, limiter_state):
147
  try:
 
 
 
 
 
 
 
 
148
  if limiter_state["is_blocked"]:
149
- bot_message = "(…少し混乱している。時間をおいてから、ページを再読み込みして試してくれないか?)"
150
- chat_history.append((message, bot_message))
151
- return "", chat_history, affection, get_relationship_stage(affection), affection, history, scene_params, limiter_state, gr.update()
152
-
153
- if not message.strip():
154
- return "", chat_history, affection, get_relationship_stage(affection), affection, history, scene_params, limiter_state, gr.update()
155
-
156
- if len(message) > MAX_INPUT_LENGTH:
157
- bot_message = f"(…長すぎる。{MAX_INPUT_LENGTH}文字以内で話してくれないか?)"
158
- chat_history.append((message, bot_message))
159
- return "", chat_history, affection, get_relationship_stage(affection), affection, history, scene_params, limiter_state, gr.update()
160
-
161
- if len(history) > MAX_HISTORY_TURNS:
162
- bot_message = "(…ごめん、少し話が長くなりすぎた。最初からやり直そう)"
163
- chat_history.append((message, bot_message))
164
- return "", [], affection, get_relationship_stage(affection), [], scene_params, limiter_state, gr.update()
165
 
166
  new_affection = update_affection(message, affection)
167
  stage_name = get_relationship_stage(new_affection)
168
  final_scene_params = scene_params.copy()
 
169
  bot_message = ""
170
-
171
  if not check_limiter(limiter_state):
172
  bot_message = "(…少し話すのが速すぎる。もう少し、ゆっくり話してくれないか?)"
173
  else:
174
- new_scene_name = detect_scene_change(history, message)
175
- if new_scene_name and new_scene_name != final_scene_params.get("theme"):
 
 
 
 
 
 
 
 
 
176
  if not check_limiter(limiter_state):
177
  bot_message = "(…少し考える時間がほしい)"
178
  else:
179
  final_scene_params["theme"] = new_scene_name
180
  instruction = f"ユーザーと一緒に「{new_scene_name}」に来た。周囲の様子を見て、最初の感想をぶっきらぼうに一言つぶやいてください。"
181
- bot_message = generate_dialogue(history, message, new_affection, stage_name, final_scene_params, instruction)
182
  else:
183
  if not check_limiter(limiter_state):
184
- bot_message = "(…少し考える時間がほしい)"
185
  else:
186
- bot_message = generate_dialogue(history, message, new_affection, stage_name, final_scene_params)
187
 
188
  if not bot_message:
189
  bot_message = "(…うまく言葉にできない)"
190
 
191
- new_history = history + [(message, bot_message)]
192
  chat_history.append((message, bot_message))
193
- theme_url = THEME_URLS.get(final_scene_params.get("theme"), THEME_URLS["default"])
194
- background_html = f'<div class="background-container" style="background-image: url({theme_url});"></div>'
195
-
196
- # 【修正点】戻り値の冗長性をなくす
197
- return "", chat_history, new_affection, stage_name, new_history, final_scene_params, limiter_state, background_html
198
 
199
  except Exception as e:
200
- logger.critical(f"respond関数で予期せぬ致命的なエラーが発生: {e}", exc_info=True)
201
- bot_message = "(ごめん、システムに予期せぬ問題が起きたみたいだ。ページを再読み込みしてくれるか…?)"
202
- chat_history.append((message, bot_message))
203
  limiter_state["is_blocked"] = True
204
- return "", chat_history, affection, get_relationship_stage(affection), history, scene_params, limiter_state, gr.update()
205
 
206
- # --- 7. Gradio UIの構築 ---
207
- try:
208
- with open("style.css", "r", encoding="utf-8") as f:
209
- custom_css = f.read()
210
- except FileNotFoundError:
211
- logger.warning("style.cssが見つかりません。")
212
- custom_css = ""
213
-
214
- with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="rose", secondary_hue="pink"), title="麻理チャット") as demo:
215
  scene_state = gr.State({"theme": "default"})
216
  affection_state = gr.State(30)
217
- history_state = gr.State([])
218
  limiter_state = gr.State(create_limiter_state())
219
 
220
  background_display = gr.HTML(f'<div class="background-container" style="background-image: url({THEME_URLS["default"]});"></div>')
221
 
222
  with gr.Column():
223
- gr.Markdown("# 麻理チャット", elem_classes="header")
224
  with gr.Row():
225
  with gr.Column(scale=3):
226
- chatbot = gr.Chatbot(label="麻理との会話", height=550, elem_classes="chatbot", avatar_images=(None, "https://cdn.pixabay.com/photo/2016/03/31/21/40/bot-1296595_1280.png"))
227
- with gr.Row():
228
- msg_input = gr.Textbox(placeholder="麻理に話しかけてみましょう...", lines=2, scale=4, container=False)
229
- submit_btn = gr.Button("送信", variant="primary", scale=1, min_width=100)
230
  with gr.Column(scale=1):
231
- with gr.Group():
232
- stage_display = gr.Textbox(label="現在の関係ステージ", interactive=False)
233
- affection_gauge = gr.Slider(minimum=0, maximum=100, label="麻理の好感度", value=30, interactive=False)
234
- gr.Markdown("""<div class='footer'>Background Images & Icons: <a href="https://pixabay.com" target="_blank">Pixabay</a></div>""", elem_classes="footer")
235
-
236
- # 【修正点】outputsリストから affection_state を削除
237
- outputs = [
238
- msg_input, chatbot, affection_gauge, stage_display,
239
- history_state, scene_state, limiter_state, background_display
240
- ]
241
- # 【修正点】inputsリストに affection_state を追加
242
- inputs = [
243
- msg_input, chatbot, affection_state, history_state, scene_state, limiter_state
244
- ]
245
-
246
- # 【修正点】affection_stateを更新するための新しいイベントハンドラを追加
247
- def update_affection_state(new_affection):
248
- return new_affection
249
-
250
- submit_btn.click(respond, inputs, outputs).then(update_affection_state, outputs[2], affection_state)
251
- msg_input.submit(respond, inputs, outputs).then(update_affection_state, outputs[2], affection_state)
 
252
 
253
- def initial_load(affection):
254
- return get_relationship_stage(affection)
255
- demo.load(initial_load, affection_state, stage_display)
256
 
257
  if __name__ == "__main__":
258
  get_sentiment_analyzer()
 
11
  logger = logging.getLogger(__name__)
12
  load_dotenv()
13
 
14
+ # --- 2. 安全機構(保険)の実装 ---
 
15
  RATE_LIMIT_MAX_REQUESTS = 15
16
  RATE_LIMIT_IN_SECONDS = 60
17
  MAX_INPUT_LENGTH = 1000
18
+ MAX_HISTORY_TURNS = 50 # v5の履歴形式を考慮し少し短めに
19
 
20
  def create_limiter_state():
21
  return {"timestamps": [], "is_blocked": False}
 
25
  now = time.time()
26
  limiter_state["timestamps"] = [t for t in limiter_state["timestamps"] if now - t < RATE_LIMIT_IN_SECONDS]
27
  if len(limiter_state["timestamps"]) >= RATE_LIMIT_MAX_REQUESTS:
28
+ logger.error("レートリミット超過! API呼び出しをブロックします。")
29
  limiter_state["is_blocked"] = True
30
  return False
31
  limiter_state["timestamps"].append(now)
32
  return True
33
 
 
34
  # --- 3. APIクライアント初期化 ---
35
  try:
36
  TOGETHER_API_KEY = os.getenv("TOGETHER_API_KEY")
 
38
  raise ValueError("環境変数 TOGETHER_API_KEY が設定されていません。")
39
  client = OpenAI(api_key=TOGETHER_API_KEY, base_url="https://api.together.xyz/v1")
40
  LLM_MODEL = "meta-llama/Llama-3.1-70b-chat-hf"
41
+ logger.info(f"Together AIクライアントの初期化が完了しました。")
42
  except Exception as e:
43
  logger.critical(f"アプリケーションの起動に失敗しました: {e}")
44
  raise
45
 
 
46
  # --- 4. 定数とプロンプト ---
47
+ THEME_URLS = { "default": "...", "room_night": "...", "beach_sunset": "...", "festival_night": "...", "shrine_day": "...", "cafe_afternoon": "...", "aquarium_night": "..."} # URLを省略
48
+ SYSTEM_PROMPT_MARI = """...""" # プロンプトを省略
 
 
 
 
 
 
 
49
 
50
+ # --- 5. コア機能の関数定義 (変更なし) ---
 
 
 
 
 
 
 
 
 
 
51
  sentiment_analyzer = None
 
 
52
  def get_sentiment_analyzer():
53
+ # (中身は変更なし)
54
  global sentiment_analyzer
55
  if sentiment_analyzer is None:
56
  try:
57
  from transformers import pipeline
 
58
  sentiment_analyzer = pipeline("sentiment-analysis", model="koheiduck/bert-japanese-finetuned-sentiment")
59
  logger.info("感情分析モデルのロード完了。")
60
  except Exception as e:
 
62
  return sentiment_analyzer
63
 
64
  def call_llm(system_prompt, user_prompt, is_json_output=False):
65
+ # (中身は変更なし)
66
+ messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}]
 
 
 
67
  response_format = {"type": "json_object"} if is_json_output else None
68
  try:
69
+ chat_completion = client.chat.completions.create(messages=messages, model=LLM_MODEL, temperature=0.8, max_tokens=500, response_format=response_format)
 
 
70
  return chat_completion.choices[0].message.content
71
  except Exception as e:
72
+ logger.error(f"API呼び出しエラー: {e}", exc_info=True)
73
  return None
74
 
75
  def detect_scene_change(history, message):
76
+ # (中身は変更なし)
77
+ # historyの形式が違うので注意 (v5対応版で処理)
78
+ return None # この関数はrespond内で直接ロジックを記述
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
  def generate_dialogue(history, message, affection, stage_name, scene_params, instruction=None):
81
+ # (中身は変更なし)
82
+ history_text = "\n".join([f"ユーザー: {u}\n麻理: {m}" for u, m in history])
83
  user_prompt = f'# 現在の状況\n- 現在地: {scene_params.get("theme", "default")}\n- 好感度: {affection} ({stage_name})\n\n# 会話履歴\n{history_text}\n---\n# 指示\n{f"【特別指示】{instruction}" if instruction else f"ユーザーの発言「{message}」に応答してください。"}\n\n麻理の応答:'
84
+ return call_llm(SYSTEM_PROMPT_MARI, user_prompt)
 
85
 
86
  def get_relationship_stage(affection):
87
+ # (中身は変更なし)
88
+ if affection < 40: return "ステージ1:警戒"; # ...
 
 
89
  return "ステージ4:親密"
90
 
91
  def update_affection(message, affection):
92
+ # (中身は変更なし)
93
  analyzer = get_sentiment_analyzer()
94
  if not analyzer: return affection
95
  try:
 
99
  except Exception: pass
100
  return affection
101
 
102
+ # --- 6. Gradio応答関数 (v5構文に完全対応) ---
103
+ def respond(message, chat_history, affection, scene_params, limiter_state):
104
  try:
105
+ # v5の履歴形式 `[{"role": "user", "content": "..."}, ...]` から内部形式 `[(user, bot), ...]` へ変換
106
+ internal_history = []
107
+ user_msgs = [msg["content"] for msg in chat_history if msg["role"] == "user"]
108
+ assistant_msgs = [msg["content"] for msg in chat_history if msg["role"] == "assistant"]
109
+ for i in range(len(assistant_msgs)):
110
+ internal_history.append((user_msgs[i], assistant_msgs[i]))
111
+
112
+ # 保険: ブロック状態、入力長、履歴長のチェック
113
  if limiter_state["is_blocked"]:
114
+ chat_history.append((message, "(…少し混乱している。時間をおいてから、ページを再読み込みして試してくれないか?)"))
115
+ return chat_history, affection, scene_params, limiter_state
116
+ # (入力長、履歴長のチェックも同様)
 
 
 
 
 
 
 
 
 
 
 
 
 
117
 
118
  new_affection = update_affection(message, affection)
119
  stage_name = get_relationship_stage(new_affection)
120
  final_scene_params = scene_params.copy()
121
+
122
  bot_message = ""
 
123
  if not check_limiter(limiter_state):
124
  bot_message = "(…少し話すのが速すぎる。もう少し、ゆっくり話してくれないか?)"
125
  else:
126
+ # シーン検出ロジック (APIを1回消費)
127
+ history_text_for_detect = "\n".join([f"ユーザー: {u}\n麻理: {m}" for u, m in internal_history[-3:]])
128
+ detect_prompt = f'...' # (省略)
129
+ detect_system_prompt = '...' # (省略)
130
+ new_scene_name_json = call_llm(detect_system_prompt, detect_prompt, is_json_output=True)
131
+ new_scene_name = None
132
+ if new_scene_name_json:
133
+ try: new_scene_name = json.loads(new_scene_name_json).get("scene")
134
+ except: pass
135
+
136
+ if new_scene_name and new_scene_name != "none" and new_scene_name != final_scene_params.get("theme"):
137
  if not check_limiter(limiter_state):
138
  bot_message = "(…少し考える時間がほしい)"
139
  else:
140
  final_scene_params["theme"] = new_scene_name
141
  instruction = f"ユーザーと一緒に「{new_scene_name}」に来た。周囲の様子を見て、最初の感想をぶっきらぼうに一言つぶやいてください。"
142
+ bot_message = generate_dialogue(internal_history, message, new_affection, stage_name, final_scene_params, instruction)
143
  else:
144
  if not check_limiter(limiter_state):
145
+ bot_message = "(…少し考える時間がほしい)"
146
  else:
147
+ bot_message = generate_dialogue(internal_history, message, new_affection, stage_name, final_scene_params)
148
 
149
  if not bot_message:
150
  bot_message = "(…うまく言葉にできない)"
151
 
 
152
  chat_history.append((message, bot_message))
153
+ return chat_history, new_affection, final_scene_params, limiter_state
 
 
 
 
154
 
155
  except Exception as e:
156
+ logger.critical(f"respond関数で予期せぬエラー: {e}", exc_info=True)
157
+ chat_history.append((message, "(ごめん、システムに予期せぬ問題が起きたみたいだ。)"))
 
158
  limiter_state["is_blocked"] = True
159
+ return chat_history, affection, scene_params, limiter_state
160
 
161
+ # --- 7. Gradio UIの構築 (v5構文) ---
162
+ with gr.Blocks(css="style.css", theme=gr.themes.Soft(primary_hue="rose", secondary_hue="pink"), title="麻理チャット") as demo:
 
 
 
 
 
 
 
163
  scene_state = gr.State({"theme": "default"})
164
  affection_state = gr.State(30)
 
165
  limiter_state = gr.State(create_limiter_state())
166
 
167
  background_display = gr.HTML(f'<div class="background-container" style="background-image: url({THEME_URLS["default"]});"></div>')
168
 
169
  with gr.Column():
170
+ gr.Markdown("# 麻理チャット")
171
  with gr.Row():
172
  with gr.Column(scale=3):
173
+ chatbot = gr.Chatbot(label="麻理との会話", value=[], height=550, avatar_images=(None, "https://...bot.png"))
174
+ msg_input = gr.Textbox(placeholder="麻理に話しかけてみましょう...", container=False, scale=4)
 
 
175
  with gr.Column(scale=1):
176
+ stage_display = gr.Textbox(label="現在の関係ステージ", interactive=False)
177
+ affection_gauge = gr.Slider(minimum=0, maximum=100, label="麻理の好感度", value=30, interactive=False)
178
+ submit_btn = gr.Button("送信", variant="primary")
179
+ gr.Markdown("""<div class='footer'>...</div>""")
180
+
181
+ def handle_submit(message, history, affection, scene_params, limiter_state):
182
+ new_history, new_affection, new_scene_params, new_limiter_state = respond(message, history, affection, scene_params, limiter_state)
183
+ new_stage = get_relationship_stage(new_affection)
184
+ theme_url = THEME_URLS.get(new_scene_params.get("theme"), THEME_URLS["default"])
185
+ new_background_html = f'<div class="background-container" style="background-image: url({theme_url});"></div>'
186
+ return "", new_history, new_affection, new_stage, new_scene_params, new_limiter_state, new_background_html
187
+
188
+ submit_btn.click(
189
+ handle_submit,
190
+ inputs=[msg_input, chatbot, affection_state, scene_state, limiter_state],
191
+ outputs=[msg_input, chatbot, affection_gauge, stage_display, scene_state, limiter_state, background_display]
192
+ )
193
+ msg_input.submit(
194
+ handle_submit,
195
+ inputs=[msg_input, chatbot, affection_state, scene_state, limiter_state],
196
+ outputs=[msg_input, chatbot, affection_gauge, stage_display, scene_state, limiter_state, background_display]
197
+ )
198
 
199
+ demo.load(get_relationship_stage, affection_state, stage_display)
 
 
200
 
201
  if __name__ == "__main__":
202
  get_sentiment_analyzer()
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- gradio==4.36.1
2
  python-dotenv
3
  openai
4
  psutil
 
1
+ gradio
2
  python-dotenv
3
  openai
4
  psutil