tracy04 commited on
Commit
2b295a0
·
verified ·
1 Parent(s): d7ba616

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -3
app.py CHANGED
@@ -1,5 +1,13 @@
1
  import os
2
  import gradio as gr
 
 
 
 
 
 
 
 
3
 
4
  # --- OpenAI (SDK officiel) ---
5
  from openai import OpenAI
@@ -15,9 +23,11 @@ DEFAULT_PROVIDER = "openai" # ou "mistral"
15
 
16
  def llm_chat(messages, max_tokens, temperature, top_p, provider=DEFAULT_PROVIDER):
17
  provider = (provider or "").strip().lower()
 
 
18
 
19
  if provider == "openai":
20
- # Chat Completions OpenAI
21
  stream = openai_client.chat.completions.create(
22
  model="gpt-4o-mini",
23
  messages=messages,
@@ -29,10 +39,11 @@ def llm_chat(messages, max_tokens, temperature, top_p, provider=DEFAULT_PROVIDER
29
  for chunk in stream:
30
  delta = chunk.choices[0].delta
31
  if delta and delta.content:
 
32
  yield delta.content
33
 
34
  elif provider == "mistral":
35
- # Chat Completions Mistral
36
  stream = mistral_client.chat.stream(
37
  model="mistral-large-latest",
38
  messages=messages,
@@ -44,10 +55,12 @@ def llm_chat(messages, max_tokens, temperature, top_p, provider=DEFAULT_PROVIDER
44
  if event.type == "chat.completion.chunk":
45
  piece = event.data.delta or ""
46
  if piece:
 
47
  yield piece
48
  stream.close()
49
 
50
  else:
 
51
  yield "[Erreur] Provider inconnu (utilise 'openai' ou 'mistral')."
52
 
53
 
@@ -60,11 +73,11 @@ def respond(
60
  top_p,
61
  provider, # "openai" ou "mistral"
62
  ):
63
- # Construit le format messages attendu
64
  messages = [{"role": "system", "content": system_message}]
65
  messages.extend(history)
66
  messages.append({"role": "user", "content": message})
67
 
 
68
  response = ""
69
  for token in llm_chat(
70
  messages=messages,
@@ -75,6 +88,7 @@ def respond(
75
  ):
76
  response += token
77
  yield response
 
78
 
79
 
80
  chatbot = gr.ChatInterface(
 
1
  import os
2
  import gradio as gr
3
+ import logging
4
+
5
+ # --- Config logs ---
6
+ logging.basicConfig(
7
+ level=logging.INFO,
8
+ format="[%(asctime)s] %(levelname)s - %(message)s"
9
+ )
10
+ logger = logging.getLogger(__name__)
11
 
12
  # --- OpenAI (SDK officiel) ---
13
  from openai import OpenAI
 
23
 
24
  def llm_chat(messages, max_tokens, temperature, top_p, provider=DEFAULT_PROVIDER):
25
  provider = (provider or "").strip().lower()
26
+ logger.info(f"Appel LLM provider={provider}, max_tokens={max_tokens}, temp={temperature}, top_p={top_p}")
27
+ logger.info(f"Messages envoyés: {messages}")
28
 
29
  if provider == "openai":
30
+ logger.info("→ Appel OpenAI Chat Completions")
31
  stream = openai_client.chat.completions.create(
32
  model="gpt-4o-mini",
33
  messages=messages,
 
39
  for chunk in stream:
40
  delta = chunk.choices[0].delta
41
  if delta and delta.content:
42
+ logger.debug(f"OpenAI renvoie token: {delta.content!r}")
43
  yield delta.content
44
 
45
  elif provider == "mistral":
46
+ logger.info("→ Appel Mistral Chat Completions")
47
  stream = mistral_client.chat.stream(
48
  model="mistral-large-latest",
49
  messages=messages,
 
55
  if event.type == "chat.completion.chunk":
56
  piece = event.data.delta or ""
57
  if piece:
58
+ logger.debug(f"Mistral renvoie token: {piece!r}")
59
  yield piece
60
  stream.close()
61
 
62
  else:
63
+ logger.error(f"Provider inconnu: {provider}")
64
  yield "[Erreur] Provider inconnu (utilise 'openai' ou 'mistral')."
65
 
66
 
 
73
  top_p,
74
  provider, # "openai" ou "mistral"
75
  ):
 
76
  messages = [{"role": "system", "content": system_message}]
77
  messages.extend(history)
78
  messages.append({"role": "user", "content": message})
79
 
80
+ logger.info(f"Nouvelle requête utilisateur: {message}")
81
  response = ""
82
  for token in llm_chat(
83
  messages=messages,
 
88
  ):
89
  response += token
90
  yield response
91
+ logger.info(f"Réponse finale générée: {response}")
92
 
93
 
94
  chatbot = gr.ChatInterface(