Upload app.py
Browse files
app.py
CHANGED
|
@@ -12,28 +12,35 @@ logger = logging.getLogger(__name__)
|
|
| 12 |
load_dotenv()
|
| 13 |
|
| 14 |
|
| 15 |
-
# --- 2. 安全機構(保険)の実装 (
|
| 16 |
-
|
| 17 |
-
|
| 18 |
MAX_INPUT_LENGTH = 1000
|
| 19 |
MAX_HISTORY_TURNS = 100
|
| 20 |
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
# --- 3. APIクライアント初期化 ---
|
| 24 |
try:
|
| 25 |
TOGETHER_API_KEY = os.getenv("TOGETHER_API_KEY")
|
| 26 |
if not TOGETHER_API_KEY:
|
| 27 |
-
raise ValueError("環境変数 TOGETHER_API_KEY が設定されていません。
|
| 28 |
-
|
| 29 |
-
client = OpenAI(
|
| 30 |
-
api_key=TOGETHER_API_KEY,
|
| 31 |
-
base_url="https://api.together.xyz/v1",
|
| 32 |
-
)
|
| 33 |
-
|
| 34 |
LLM_MODEL = "meta-llama/Llama-3.1-70b-chat-hf"
|
| 35 |
logger.info(f"Together AIクライアントの初期化が完了しました。モデル: {LLM_MODEL}")
|
| 36 |
-
|
| 37 |
except Exception as e:
|
| 38 |
logger.critical(f"アプリケーションの起動に失敗しました: {e}")
|
| 39 |
raise
|
|
@@ -61,11 +68,9 @@ SYSTEM_PROMPT_MARI = """
|
|
| 61 |
</roleplay_configuration>
|
| 62 |
上記のロールプレイ設定に基づき、麻理として応答を開始してください。
|
| 63 |
"""
|
| 64 |
-
|
| 65 |
sentiment_analyzer = None
|
| 66 |
|
| 67 |
# --- 5. コア機能の関数定義 ---
|
| 68 |
-
|
| 69 |
def get_sentiment_analyzer():
|
| 70 |
global sentiment_analyzer
|
| 71 |
if sentiment_analyzer is None:
|
|
@@ -79,7 +84,7 @@ def get_sentiment_analyzer():
|
|
| 79 |
return sentiment_analyzer
|
| 80 |
|
| 81 |
def call_llm(system_prompt, user_prompt, is_json_output=False):
|
| 82 |
-
|
| 83 |
messages = [
|
| 84 |
{"role": "system", "content": system_prompt},
|
| 85 |
{"role": "user", "content": user_prompt}
|
|
@@ -87,11 +92,7 @@ def call_llm(system_prompt, user_prompt, is_json_output=False):
|
|
| 87 |
response_format = {"type": "json_object"} if is_json_output else None
|
| 88 |
try:
|
| 89 |
chat_completion = client.chat.completions.create(
|
| 90 |
-
messages=messages,
|
| 91 |
-
model=LLM_MODEL,
|
| 92 |
-
temperature=0.8,
|
| 93 |
-
max_tokens=500,
|
| 94 |
-
response_format=response_format,
|
| 95 |
)
|
| 96 |
return chat_completion.choices[0].message.content
|
| 97 |
except Exception as e:
|
|
@@ -99,19 +100,11 @@ def call_llm(system_prompt, user_prompt, is_json_output=False):
|
|
| 99 |
return None
|
| 100 |
|
| 101 |
def detect_scene_change(history, message):
|
|
|
|
| 102 |
history_text = "\n".join([f"ユーザー: {u}\n麻理: {m}" for u, m in history[-3:]])
|
| 103 |
available_keywords = ", ".join(THEME_URLS.keys())
|
| 104 |
system_prompt = "あなたは会話分析のエキスパートです。ユーザーの提案とキャラクターの反応から、シーン(場所)が変更されるか判断し、指定されたキーワードでJSON形式で出力してください。"
|
| 105 |
-
user_prompt = f"""
|
| 106 |
-
会話履歴:
|
| 107 |
-
{history_text}
|
| 108 |
-
ユーザー: {message}
|
| 109 |
-
---
|
| 110 |
-
上記の会話の流れから、キャラクターが場所の移動に合意したかを判断してください。
|
| 111 |
-
合意した場合は、以下のキーワードから最も適切なものを一つ選び {{"scene": "キーワード"}} の形式で出力してください。
|
| 112 |
-
合意していない場合は {{"scene": "none"}} と出力してください。
|
| 113 |
-
キーワード: {available_keywords}
|
| 114 |
-
"""
|
| 115 |
response_text = call_llm(system_prompt, user_prompt, is_json_output=True)
|
| 116 |
if response_text:
|
| 117 |
try:
|
|
@@ -125,29 +118,21 @@ def detect_scene_change(history, message):
|
|
| 125 |
return None
|
| 126 |
|
| 127 |
def generate_dialogue(history, message, affection, stage_name, scene_params, instruction=None):
|
|
|
|
| 128 |
history_text = "\n".join([f"ユーザー: {u}\n麻理: {m}" for u, m in history[-5:]])
|
| 129 |
-
user_prompt = f"""
|
| 130 |
-
# 現在の状況
|
| 131 |
-
- 現在地: {scene_params.get("theme", "default")}
|
| 132 |
-
- 好感度: {affection} ({stage_name})
|
| 133 |
-
|
| 134 |
-
# 会話履歴
|
| 135 |
-
{history_text}
|
| 136 |
-
---
|
| 137 |
-
# 指示
|
| 138 |
-
{f"【特別指示】{instruction}" if instruction else f"ユーザーの発言「{message}」に応答してください。"}
|
| 139 |
-
|
| 140 |
-
麻理の応答:"""
|
| 141 |
response_text = call_llm(SYSTEM_PROMPT_MARI, user_prompt)
|
| 142 |
return response_text if response_text else "(…うまく言葉が出てこない。少し時間を置いてほしい)"
|
| 143 |
|
| 144 |
def get_relationship_stage(affection):
|
|
|
|
| 145 |
if affection < 40: return "ステージ1:警戒"
|
| 146 |
if affection < 60: return "ステージ2:関心"
|
| 147 |
if affection < 80: return "ステージ3:信頼"
|
| 148 |
return "ステージ4:親密"
|
| 149 |
|
| 150 |
def update_affection(message, affection):
|
|
|
|
| 151 |
analyzer = get_sentiment_analyzer()
|
| 152 |
if not analyzer: return affection
|
| 153 |
try:
|
|
@@ -157,72 +142,80 @@ def update_affection(message, affection):
|
|
| 157 |
except Exception: pass
|
| 158 |
return affection
|
| 159 |
|
| 160 |
-
|
| 161 |
# --- 6. Gradio応答関数 ---
|
| 162 |
-
def respond(message, chat_history, affection, history, scene_params):
|
| 163 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 164 |
if not message.strip():
|
| 165 |
-
return "", chat_history, affection, get_relationship_stage(affection), affection, history, scene_params, gr.update()
|
| 166 |
|
| 167 |
if len(message) > MAX_INPUT_LENGTH:
|
| 168 |
-
logger.warning(f"入力長超過: {len(message)}文字")
|
| 169 |
bot_message = f"(…長すぎる。{MAX_INPUT_LENGTH}文字以内で話してくれないか?)"
|
| 170 |
chat_history.append((message, bot_message))
|
| 171 |
-
return "", chat_history, affection, get_relationship_stage(affection), affection, history, scene_params, gr.update()
|
| 172 |
|
| 173 |
if len(history) > MAX_HISTORY_TURNS:
|
| 174 |
-
logger.error("会話履歴が長すぎます。システム保護のため、会話をリセットします。")
|
| 175 |
-
history = []
|
| 176 |
-
chat_history = []
|
| 177 |
bot_message = "(…ごめん、少し話が長くなりすぎた。最初からやり直そう)"
|
| 178 |
chat_history.append((message, bot_message))
|
| 179 |
-
return "",
|
| 180 |
|
| 181 |
new_affection = update_affection(message, affection)
|
| 182 |
stage_name = get_relationship_stage(new_affection)
|
| 183 |
final_scene_params = scene_params.copy()
|
| 184 |
-
|
| 185 |
bot_message = ""
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
logger.info(f"シーンチェンジ実行: {final_scene_params.get('theme')} -> {new_scene_name}")
|
| 190 |
-
final_scene_params["theme"] = new_scene_name
|
| 191 |
-
instruction = f"ユーザーと一緒に「{new_scene_name}」に来た。周囲の様子を見て、最初の感想をぶっきらぼうに一言つぶやいてください。"
|
| 192 |
-
bot_message = generate_dialogue(history, message, new_affection, stage_name, final_scene_params, instruction)
|
| 193 |
else:
|
| 194 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 195 |
|
| 196 |
if not bot_message:
|
| 197 |
bot_message = "(…うまく言葉にできない)"
|
| 198 |
|
| 199 |
new_history = history + [(message, bot_message)]
|
| 200 |
chat_history.append((message, bot_message))
|
| 201 |
-
|
| 202 |
theme_url = THEME_URLS.get(final_scene_params.get("theme"), THEME_URLS["default"])
|
| 203 |
background_html = f'<div class="background-container" style="background-image: url({theme_url});"></div>'
|
| 204 |
|
| 205 |
-
|
|
|
|
| 206 |
|
| 207 |
except Exception as e:
|
| 208 |
logger.critical(f"respond関数で予期せぬ致命的なエラーが発生: {e}", exc_info=True)
|
| 209 |
bot_message = "(ごめん、システムに予期せぬ問題が起きたみたいだ。ページを再読み込みしてくれるか…?)"
|
| 210 |
chat_history.append((message, bot_message))
|
| 211 |
-
|
| 212 |
-
|
| 213 |
|
| 214 |
# --- 7. Gradio UIの構築 ---
|
| 215 |
try:
|
| 216 |
with open("style.css", "r", encoding="utf-8") as f:
|
| 217 |
custom_css = f.read()
|
| 218 |
except FileNotFoundError:
|
| 219 |
-
logger.warning("style.css
|
| 220 |
custom_css = ""
|
| 221 |
|
| 222 |
with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="rose", secondary_hue="pink"), title="麻理チャット") as demo:
|
| 223 |
scene_state = gr.State({"theme": "default"})
|
| 224 |
affection_state = gr.State(30)
|
| 225 |
history_state = gr.State([])
|
|
|
|
| 226 |
|
| 227 |
background_display = gr.HTML(f'<div class="background-container" style="background-image: url({THEME_URLS["default"]});"></div>')
|
| 228 |
|
|
@@ -230,12 +223,7 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="rose", secondar
|
|
| 230 |
gr.Markdown("# 麻理チャット", elem_classes="header")
|
| 231 |
with gr.Row():
|
| 232 |
with gr.Column(scale=3):
|
| 233 |
-
chatbot = gr.Chatbot(
|
| 234 |
-
label="麻理との会話",
|
| 235 |
-
height=550,
|
| 236 |
-
elem_classes="chatbot",
|
| 237 |
-
avatar_images=(None, "https://cdn.pixabay.com/photo/2016/03/31/21/40/bot-1296595_1280.png"),
|
| 238 |
-
)
|
| 239 |
with gr.Row():
|
| 240 |
msg_input = gr.Textbox(placeholder="麻理に話しかけてみましょう...", lines=2, scale=4, container=False)
|
| 241 |
submit_btn = gr.Button("送信", variant="primary", scale=1, min_width=100)
|
|
@@ -245,17 +233,27 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="rose", secondar
|
|
| 245 |
affection_gauge = gr.Slider(minimum=0, maximum=100, label="麻理の好感度", value=30, interactive=False)
|
| 246 |
gr.Markdown("""<div class='footer'>Background Images & Icons: <a href="https://pixabay.com" target="_blank">Pixabay</a></div>""", elem_classes="footer")
|
| 247 |
|
| 248 |
-
outputs
|
| 249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 250 |
|
| 251 |
-
|
| 252 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 253 |
|
| 254 |
def initial_load(affection):
|
| 255 |
return get_relationship_stage(affection)
|
| 256 |
demo.load(initial_load, affection_state, stage_display)
|
| 257 |
|
| 258 |
-
|
| 259 |
if __name__ == "__main__":
|
| 260 |
get_sentiment_analyzer()
|
| 261 |
demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)))
|
|
|
|
| 12 |
load_dotenv()
|
| 13 |
|
| 14 |
|
| 15 |
+
# --- 2. 安全機構(保険)の実装 (元の形に戻します) ---
|
| 16 |
+
RATE_LIMIT_MAX_REQUESTS = 15
|
| 17 |
+
RATE_LIMIT_IN_SECONDS = 60
|
| 18 |
MAX_INPUT_LENGTH = 1000
|
| 19 |
MAX_HISTORY_TURNS = 100
|
| 20 |
|
| 21 |
+
def create_limiter_state():
|
| 22 |
+
return {"timestamps": [], "is_blocked": False}
|
| 23 |
+
|
| 24 |
+
def check_limiter(limiter_state):
|
| 25 |
+
if limiter_state["is_blocked"]: return False
|
| 26 |
+
now = time.time()
|
| 27 |
+
limiter_state["timestamps"] = [t for t in limiter_state["timestamps"] if now - t < RATE_LIMIT_IN_SECONDS]
|
| 28 |
+
if len(limiter_state["timestamps"]) >= RATE_LIMIT_MAX_REQUESTS:
|
| 29 |
+
logger.error(f"レートリミット超過! API呼び出しをブロックします。")
|
| 30 |
+
limiter_state["is_blocked"] = True
|
| 31 |
+
return False
|
| 32 |
+
limiter_state["timestamps"].append(now)
|
| 33 |
+
return True
|
| 34 |
+
|
| 35 |
|
| 36 |
# --- 3. APIクライアント初期化 ---
|
| 37 |
try:
|
| 38 |
TOGETHER_API_KEY = os.getenv("TOGETHER_API_KEY")
|
| 39 |
if not TOGETHER_API_KEY:
|
| 40 |
+
raise ValueError("環境変数 TOGETHER_API_KEY が設定されていません。")
|
| 41 |
+
client = OpenAI(api_key=TOGETHER_API_KEY, base_url="https://api.together.xyz/v1")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
LLM_MODEL = "meta-llama/Llama-3.1-70b-chat-hf"
|
| 43 |
logger.info(f"Together AIクライアントの初期化が完了しました。モデル: {LLM_MODEL}")
|
|
|
|
| 44 |
except Exception as e:
|
| 45 |
logger.critical(f"アプリケーションの起動に失敗しました: {e}")
|
| 46 |
raise
|
|
|
|
| 68 |
</roleplay_configuration>
|
| 69 |
上記のロールプレイ設定に基づき、麻理として応答を開始してください。
|
| 70 |
"""
|
|
|
|
| 71 |
sentiment_analyzer = None
|
| 72 |
|
| 73 |
# --- 5. コア機能の関数定義 ---
|
|
|
|
| 74 |
def get_sentiment_analyzer():
|
| 75 |
global sentiment_analyzer
|
| 76 |
if sentiment_analyzer is None:
|
|
|
|
| 84 |
return sentiment_analyzer
|
| 85 |
|
| 86 |
def call_llm(system_prompt, user_prompt, is_json_output=False):
|
| 87 |
+
# ... (この関数の中身は変更なし)
|
| 88 |
messages = [
|
| 89 |
{"role": "system", "content": system_prompt},
|
| 90 |
{"role": "user", "content": user_prompt}
|
|
|
|
| 92 |
response_format = {"type": "json_object"} if is_json_output else None
|
| 93 |
try:
|
| 94 |
chat_completion = client.chat.completions.create(
|
| 95 |
+
messages=messages, model=LLM_MODEL, temperature=0.8, max_tokens=500, response_format=response_format
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
)
|
| 97 |
return chat_completion.choices[0].message.content
|
| 98 |
except Exception as e:
|
|
|
|
| 100 |
return None
|
| 101 |
|
| 102 |
def detect_scene_change(history, message):
|
| 103 |
+
# ... (この関数の中身は変更なし)
|
| 104 |
history_text = "\n".join([f"ユーザー: {u}\n麻理: {m}" for u, m in history[-3:]])
|
| 105 |
available_keywords = ", ".join(THEME_URLS.keys())
|
| 106 |
system_prompt = "あなたは会話分析のエキスパートです。ユーザーの提案とキャラクターの反応から、シーン(場所)が変更されるか判断し、指定されたキーワードでJSON形式で出力してください。"
|
| 107 |
+
user_prompt = f'会話履歴:\n{history_text}\nユーザー: {message}\n---\n上記の会話の流れから、キャラクターが場所の移動に合意したかを判断してください。\n合意した場合は、以下のキーワードから最も適切なものを一つ選び {{"scene": "キーワード"}} の形式で出力してください。\n合意していない場合は {{"scene": "none"}} と出力してください。\nキーワード: {available_keywords}'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
response_text = call_llm(system_prompt, user_prompt, is_json_output=True)
|
| 109 |
if response_text:
|
| 110 |
try:
|
|
|
|
| 118 |
return None
|
| 119 |
|
| 120 |
def generate_dialogue(history, message, affection, stage_name, scene_params, instruction=None):
|
| 121 |
+
# ... (この関数の中身は変更なし)
|
| 122 |
history_text = "\n".join([f"ユーザー: {u}\n麻理: {m}" for u, m in history[-5:]])
|
| 123 |
+
user_prompt = f'# 現在の状況\n- 現在地: {scene_params.get("theme", "default")}\n- 好感度: {affection} ({stage_name})\n\n# 会話履歴\n{history_text}\n---\n# 指示\n{f"【特別指示】{instruction}" if instruction else f"ユーザーの発言「{message}」に応答してください。"}\n\n麻理の応答:'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
response_text = call_llm(SYSTEM_PROMPT_MARI, user_prompt)
|
| 125 |
return response_text if response_text else "(…うまく言葉が出てこない。少し時間を置いてほしい)"
|
| 126 |
|
| 127 |
def get_relationship_stage(affection):
|
| 128 |
+
# ... (この関数の中身は変更なし)
|
| 129 |
if affection < 40: return "ステージ1:警戒"
|
| 130 |
if affection < 60: return "ステージ2:関心"
|
| 131 |
if affection < 80: return "ステージ3:信頼"
|
| 132 |
return "ステージ4:親密"
|
| 133 |
|
| 134 |
def update_affection(message, affection):
|
| 135 |
+
# ... (この関数の中身は変更なし)
|
| 136 |
analyzer = get_sentiment_analyzer()
|
| 137 |
if not analyzer: return affection
|
| 138 |
try:
|
|
|
|
| 142 |
except Exception: pass
|
| 143 |
return affection
|
| 144 |
|
|
|
|
| 145 |
# --- 6. Gradio応答関数 ---
|
| 146 |
+
def respond(message, chat_history, affection, history, scene_params, limiter_state):
|
| 147 |
try:
|
| 148 |
+
if limiter_state["is_blocked"]:
|
| 149 |
+
bot_message = "(…少し混乱している。時間をおいてから、ページを再読み込みして試してくれないか?)"
|
| 150 |
+
chat_history.append((message, bot_message))
|
| 151 |
+
return "", chat_history, affection, get_relationship_stage(affection), affection, history, scene_params, limiter_state, gr.update()
|
| 152 |
+
|
| 153 |
if not message.strip():
|
| 154 |
+
return "", chat_history, affection, get_relationship_stage(affection), affection, history, scene_params, limiter_state, gr.update()
|
| 155 |
|
| 156 |
if len(message) > MAX_INPUT_LENGTH:
|
|
|
|
| 157 |
bot_message = f"(…長すぎる。{MAX_INPUT_LENGTH}文字以内で話してくれないか?)"
|
| 158 |
chat_history.append((message, bot_message))
|
| 159 |
+
return "", chat_history, affection, get_relationship_stage(affection), affection, history, scene_params, limiter_state, gr.update()
|
| 160 |
|
| 161 |
if len(history) > MAX_HISTORY_TURNS:
|
|
|
|
|
|
|
|
|
|
| 162 |
bot_message = "(…ごめん、少し話が長くなりすぎた。最初からやり直そう)"
|
| 163 |
chat_history.append((message, bot_message))
|
| 164 |
+
return "", [], affection, get_relationship_stage(affection), [], scene_params, limiter_state, gr.update()
|
| 165 |
|
| 166 |
new_affection = update_affection(message, affection)
|
| 167 |
stage_name = get_relationship_stage(new_affection)
|
| 168 |
final_scene_params = scene_params.copy()
|
|
|
|
| 169 |
bot_message = ""
|
| 170 |
+
|
| 171 |
+
if not check_limiter(limiter_state):
|
| 172 |
+
bot_message = "(…少し話すのが速すぎる。もう少し、ゆっくり話してくれないか?)"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 173 |
else:
|
| 174 |
+
new_scene_name = detect_scene_change(history, message)
|
| 175 |
+
if new_scene_name and new_scene_name != final_scene_params.get("theme"):
|
| 176 |
+
if not check_limiter(limiter_state):
|
| 177 |
+
bot_message = "(…少し考える時間がほしい)"
|
| 178 |
+
else:
|
| 179 |
+
final_scene_params["theme"] = new_scene_name
|
| 180 |
+
instruction = f"ユーザーと一緒に「{new_scene_name}」に来た。周囲の様子を見て、最初の感想をぶっきらぼうに一言つぶやいてください。"
|
| 181 |
+
bot_message = generate_dialogue(history, message, new_affection, stage_name, final_scene_params, instruction)
|
| 182 |
+
else:
|
| 183 |
+
if not check_limiter(limiter_state):
|
| 184 |
+
bot_message = "(…少し考える時間がほしい)"
|
| 185 |
+
else:
|
| 186 |
+
bot_message = generate_dialogue(history, message, new_affection, stage_name, final_scene_params)
|
| 187 |
|
| 188 |
if not bot_message:
|
| 189 |
bot_message = "(…うまく言葉にできない)"
|
| 190 |
|
| 191 |
new_history = history + [(message, bot_message)]
|
| 192 |
chat_history.append((message, bot_message))
|
|
|
|
| 193 |
theme_url = THEME_URLS.get(final_scene_params.get("theme"), THEME_URLS["default"])
|
| 194 |
background_html = f'<div class="background-container" style="background-image: url({theme_url});"></div>'
|
| 195 |
|
| 196 |
+
# 【修正点】戻り値の冗長性をなくす
|
| 197 |
+
return "", chat_history, new_affection, stage_name, new_history, final_scene_params, limiter_state, background_html
|
| 198 |
|
| 199 |
except Exception as e:
|
| 200 |
logger.critical(f"respond関数で予期せぬ致命的なエラーが発生: {e}", exc_info=True)
|
| 201 |
bot_message = "(ごめん、システムに予期せぬ問題が起きたみたいだ。ページを再読み込みしてくれるか…?)"
|
| 202 |
chat_history.append((message, bot_message))
|
| 203 |
+
limiter_state["is_blocked"] = True
|
| 204 |
+
return "", chat_history, affection, get_relationship_stage(affection), history, scene_params, limiter_state, gr.update()
|
| 205 |
|
| 206 |
# --- 7. Gradio UIの構築 ---
|
| 207 |
try:
|
| 208 |
with open("style.css", "r", encoding="utf-8") as f:
|
| 209 |
custom_css = f.read()
|
| 210 |
except FileNotFoundError:
|
| 211 |
+
logger.warning("style.cssが見つかりません。")
|
| 212 |
custom_css = ""
|
| 213 |
|
| 214 |
with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="rose", secondary_hue="pink"), title="麻理チャット") as demo:
|
| 215 |
scene_state = gr.State({"theme": "default"})
|
| 216 |
affection_state = gr.State(30)
|
| 217 |
history_state = gr.State([])
|
| 218 |
+
limiter_state = gr.State(create_limiter_state())
|
| 219 |
|
| 220 |
background_display = gr.HTML(f'<div class="background-container" style="background-image: url({THEME_URLS["default"]});"></div>')
|
| 221 |
|
|
|
|
| 223 |
gr.Markdown("# 麻理チャット", elem_classes="header")
|
| 224 |
with gr.Row():
|
| 225 |
with gr.Column(scale=3):
|
| 226 |
+
chatbot = gr.Chatbot(label="麻理との会話", height=550, elem_classes="chatbot", avatar_images=(None, "https://cdn.pixabay.com/photo/2016/03/31/21/40/bot-1296595_1280.png"))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 227 |
with gr.Row():
|
| 228 |
msg_input = gr.Textbox(placeholder="麻理に話しかけてみましょう...", lines=2, scale=4, container=False)
|
| 229 |
submit_btn = gr.Button("送信", variant="primary", scale=1, min_width=100)
|
|
|
|
| 233 |
affection_gauge = gr.Slider(minimum=0, maximum=100, label="麻理の好感度", value=30, interactive=False)
|
| 234 |
gr.Markdown("""<div class='footer'>Background Images & Icons: <a href="https://pixabay.com" target="_blank">Pixabay</a></div>""", elem_classes="footer")
|
| 235 |
|
| 236 |
+
# 【修正点】outputsリストから affection_state を削除
|
| 237 |
+
outputs = [
|
| 238 |
+
msg_input, chatbot, affection_gauge, stage_display,
|
| 239 |
+
history_state, scene_state, limiter_state, background_display
|
| 240 |
+
]
|
| 241 |
+
# 【修正���】inputsリストに affection_state を追加
|
| 242 |
+
inputs = [
|
| 243 |
+
msg_input, chatbot, affection_state, history_state, scene_state, limiter_state
|
| 244 |
+
]
|
| 245 |
|
| 246 |
+
# 【修正点】affection_stateを更新するための新しいイベントハンドラを追加
|
| 247 |
+
def update_affection_state(new_affection):
|
| 248 |
+
return new_affection
|
| 249 |
+
|
| 250 |
+
submit_btn.click(respond, inputs, outputs).then(update_affection_state, outputs[2], affection_state)
|
| 251 |
+
msg_input.submit(respond, inputs, outputs).then(update_affection_state, outputs[2], affection_state)
|
| 252 |
|
| 253 |
def initial_load(affection):
|
| 254 |
return get_relationship_stage(affection)
|
| 255 |
demo.load(initial_load, affection_state, stage_display)
|
| 256 |
|
|
|
|
| 257 |
if __name__ == "__main__":
|
| 258 |
get_sentiment_analyzer()
|
| 259 |
demo.launch(server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)))
|