Skip to content

Commit 4dfed5c

Browse files
authored
Merge branch 'main' into IMPROVEMENT
2 parents 2ae2d10 + aba88c7 commit 4dfed5c

File tree

6 files changed

+31
-4
lines changed

6 files changed

+31
-4
lines changed

docs/agents.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ Agents are generic on their `context` type. Context is a dependency-injection to
3434
```python
3535
@dataclass
3636
class UserContext:
37+
name: str
3738
uid: str
3839
is_pro_user: bool
3940

docs/ja/agents.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ agent = Agent(
3838
```python
3939
@dataclass
4040
class UserContext:
41+
name: str
4142
uid: str
4243
is_pro_user: bool
4344

docs/scripts/generate_ref_files.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ def md_target(py_path: Path) -> Path:
3131
rel = py_path.relative_to(SRC_ROOT).with_suffix(".md")
3232
return DOCS_ROOT / rel
3333

34+
3435
def pretty_title(last_segment: str) -> str:
3536
"""
3637
Convert a module/file segment like 'tool_context' to 'Tool Context'.
@@ -39,6 +40,7 @@ def pretty_title(last_segment: str) -> str:
3940
cleaned = last_segment.replace("_", " ").replace("-", " ")
4041
return capwords(cleaned)
4142

43+
4244
# ---- Main ------------------------------------------------------------
4345

4446

src/agents/extensions/models/litellm_model.py

Lines changed: 14 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,14 @@
4545
from ...usage import Usage
4646

4747

48+
class InternalChatCompletionMessage(ChatCompletionMessage):
49+
"""
50+
An internal subclass to carry reasoning_content without modifying the original model.
51+
"""
52+
53+
reasoning_content: str
54+
55+
4856
class LitellmModel(Model):
4957
"""This class enables using any model via LiteLLM. LiteLLM allows you to acess OpenAPI,
5058
Anthropic, Gemini, Mistral, and many other models.
@@ -364,13 +372,18 @@ def convert_message_to_openai(
364372
provider_specific_fields.get("refusal", None) if provider_specific_fields else None
365373
)
366374

367-
return ChatCompletionMessage(
375+
reasoning_content = ""
376+
if hasattr(message, "reasoning_content") and message.reasoning_content:
377+
reasoning_content = message.reasoning_content
378+
379+
return InternalChatCompletionMessage(
368380
content=message.content,
369381
refusal=refusal,
370382
role="assistant",
371383
annotations=cls.convert_annotations_to_openai(message),
372384
audio=message.get("audio", None), # litellm deletes audio if not present
373385
tool_calls=tool_calls,
386+
reasoning_content=reasoning_content,
374387
)
375388

376389
@classmethod

src/agents/models/chatcmpl_converter.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
ResponseOutputRefusal,
3737
ResponseOutputText,
3838
ResponseReasoningItem,
39+
ResponseReasoningItemParam,
3940
)
4041
from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
4142
from openai.types.responses.response_reasoning_item import Summary
@@ -210,6 +211,12 @@ def maybe_response_output_message(cls, item: Any) -> ResponseOutputMessageParam
210211
return cast(ResponseOutputMessageParam, item)
211212
return None
212213

214+
@classmethod
215+
def maybe_reasoning_message(cls, item: Any) -> ResponseReasoningItemParam | None:
216+
if isinstance(item, dict) and item.get("type") == "reasoning":
217+
return cast(ResponseReasoningItemParam, item)
218+
return None
219+
213220
@classmethod
214221
def extract_text_content(
215222
cls, content: str | Iterable[ResponseInputContentParam]
@@ -459,7 +466,11 @@ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
459466
f"Encountered an item_reference, which is not supported: {item_ref}"
460467
)
461468

462-
# 7) If we haven't recognized it => fail or ignore
469+
# 7) reasoning message => not handled
470+
elif cls.maybe_reasoning_message(item):
471+
pass
472+
473+
# 8) If we haven't recognized it => fail or ignore
463474
else:
464475
raise UserError(f"Unhandled item type or structure: {item}")
465476

src/agents/tracing/processors.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -269,8 +269,7 @@ def _run(self):
269269

270270
def _export_batches(self, force: bool = False):
271271
"""Drains the queue and exports in batches. If force=True, export everything.
272-
Otherwise, export up to `max_batch_size` repeatedly until the queue is empty or below a
273-
certain threshold.
272+
Otherwise, export up to `max_batch_size` repeatedly until the queue is completely empty.
274273
"""
275274
while True:
276275
items_to_export: list[Span[Any] | Trace] = []

0 commit comments

Comments
 (0)