callanwu commited on
Commit
ddfe756
·
verified ·
1 Parent(s): 31682b8

Update llm/oai.py

Browse files
Files changed (1) hide show
  1. llm/oai.py +45 -22
llm/oai.py CHANGED
@@ -135,6 +135,8 @@ class TextChatAtOAI(BaseFnCallModel):
135
  try:
136
  MAX_RETRIES = 5
137
  INITIAL_DELAY = 2
 
 
138
  response = None
139
 
140
  for attempt in range(MAX_RETRIES):
@@ -171,7 +173,6 @@ class TextChatAtOAI(BaseFnCallModel):
171
  ]
172
  if hasattr(choice.delta, 'content') and choice.delta.content:
173
  yield [Message(role=ASSISTANT, content=choice.delta.content, reasoning_content='')]
174
- # 兼容 map agent 模型
175
  if hasattr(choice.delta, 'tool_calls') and choice.delta.tool_calls:
176
  function_name = choice.delta.tool_calls[0].function.name
177
  function_call = {
@@ -182,32 +183,54 @@ class TextChatAtOAI(BaseFnCallModel):
182
  yield [Message(role=ASSISTANT, content=f'<tool_call>{function_json}</tool_call>')]
183
  logger.info(f'delta_stream message chunk: {chunk}')
184
  else:
 
185
  full_response = ''
186
  full_reasoning_content = ''
 
 
 
187
  for chunk in response:
188
- if chunk.choices:
189
- choice = chunk.choices[0]
190
- if hasattr(choice.delta, 'reasoning') and choice.delta.reasoning:
191
- full_reasoning_content += choice.delta.reasoning
192
- if hasattr(choice.delta, 'content') and choice.delta.content:
193
- full_response += choice.delta.content
194
- # 兼容 map agent 模型
195
- if hasattr(choice.delta, 'tool_calls') and choice.delta.tool_calls:
196
- function_name = choice.delta.tool_calls[0].function.name
197
- # function_call = FunctionCall(
198
- # name=function_name,
199
- # arguments=choice.delta.tool_calls[0].function.arguments,
200
- # )
201
- # yield [Message(role=ASSISTANT, content='', function_call=function_call)]
202
- function_call = {
203
- 'name': function_name,
204
- 'arguments': json.loads(choice.delta.tool_calls[0].function.arguments)
205
- }
206
- function_json = json.dumps(function_call, ensure_ascii=False)
207
- logger.info(json.dumps(function_call, ensure_ascii=False, indent=4))
208
- full_response += f'<tool_call>{function_json}</tool_call>'
 
 
 
 
 
 
 
 
 
 
 
209
  yield [Message(role=ASSISTANT, content=full_response, reasoning_content=full_reasoning_content)]
 
 
 
 
210
  logger.info(f'message chunk: {chunk}')
 
 
 
211
  except OpenAIError as ex:
212
  raise ModelServiceError(exception=ex)
213
 
 
135
  try:
136
  MAX_RETRIES = 5
137
  INITIAL_DELAY = 2
138
+ CONTENT_THRESHOLD = 50
139
+ REASONING_THRESHOLD = 50
140
  response = None
141
 
142
  for attempt in range(MAX_RETRIES):
 
173
  ]
174
  if hasattr(choice.delta, 'content') and choice.delta.content:
175
  yield [Message(role=ASSISTANT, content=choice.delta.content, reasoning_content='')]
 
176
  if hasattr(choice.delta, 'tool_calls') and choice.delta.tool_calls:
177
  function_name = choice.delta.tool_calls[0].function.name
178
  function_call = {
 
183
  yield [Message(role=ASSISTANT, content=f'<tool_call>{function_json}</tool_call>')]
184
  logger.info(f'delta_stream message chunk: {chunk}')
185
  else:
186
+
187
  full_response = ''
188
  full_reasoning_content = ''
189
+ content_buffer = ''
190
+ reasoning_content_buffer = ''
191
+
192
  for chunk in response:
193
+ if not chunk.choices:
194
+ continue
195
+
196
+ choice = chunk.choices[0]
197
+ new_content = choice.delta.content if hasattr(choice.delta, 'content') and choice.delta.content else ''
198
+ new_reasoning = choice.delta.reasoning if hasattr(choice.delta, 'reasoning') and choice.delta.reasoning else ''
199
+ has_tool_calls = hasattr(choice.delta, 'tool_calls') and choice.delta.tool_calls
200
+
201
+ if new_reasoning:
202
+ full_reasoning_content += new_reasoning
203
+ reasoning_content_buffer += new_reasoning
204
+
205
+ if new_content:
206
+ full_response += new_content
207
+ content_buffer += new_content
208
+
209
+ if has_tool_calls:
210
+ function_name = choice.delta.tool_calls[0].function.name
211
+ function_call = {
212
+ 'name': function_name,
213
+ 'arguments': json.loads(choice.delta.tool_calls[0].function.arguments)
214
+ }
215
+ function_json = json.dumps(function_call, ensure_ascii=False)
216
+ logger.info(json.dumps(function_call, ensure_ascii=False, indent=4))
217
+ full_response += f'<tool_call>{function_json}</tool_call>'
218
+ content_buffer += '<tool_call>'
219
+
220
+ if (len(content_buffer) >= CONTENT_THRESHOLD or
221
+ len(reasoning_content_buffer) >= REASONING_THRESHOLD or
222
+ '\n' in new_content or
223
+ '\n' in new_reasoning):
224
+
225
  yield [Message(role=ASSISTANT, content=full_response, reasoning_content=full_reasoning_content)]
226
+
227
+ content_buffer = ''
228
+ reasoning_content_buffer = ''
229
+
230
  logger.info(f'message chunk: {chunk}')
231
+
232
+ if content_buffer or reasoning_content_buffer:
233
+ yield [Message(role=ASSISTANT, content=full_response, reasoning_content=full_reasoning_content)]
234
  except OpenAIError as ex:
235
  raise ModelServiceError(exception=ex)
236