Skip to content

release: 1.98.0 #2503

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jul 30, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .release-please-manifest.json
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
{
".": "1.97.2"
".": "1.98.0"
}
6 changes: 3 additions & 3 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
configured_endpoints: 111
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b2a451656ca64d30d174391ebfd94806b4de3ab76dc55b92843cfb7f1a54ecb6.yml
openapi_spec_hash: 27d9691b400f28c17ef063a1374048b0
config_hash: e822d0c9082c8b312264403949243179
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-721e6ccaa72205ee14c71f8163129920464fb814b95d3df9567a9476bbd9b7fb.yml
openapi_spec_hash: 2115413a21df8b5bf9e4552a74df4312
config_hash: 9606bb315a193bfd8da0459040143242
8 changes: 8 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
# Changelog

## 1.98.0 (2025-07-30)

Full Changelog: [v1.97.2...v1.98.0](https://github.com/openai/openai-python/compare/v1.97.2...v1.98.0)

### Features

* **api:** manual updates ([88a8036](https://github.com/openai/openai-python/commit/88a8036c5ea186f36c57029ef4501a0833596f56))

## 1.97.2 (2025-07-30)

Full Changelog: [v1.97.1...v1.97.2](https://github.com/openai/openai-python/compare/v1.97.1...v1.97.2)
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "openai"
version = "1.97.2"
version = "1.98.0"
description = "The official Python library for the openai API"
dynamic = ["readme"]
license = "Apache-2.0"
Expand Down
2 changes: 1 addition & 1 deletion src/openai/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "openai"
__version__ = "1.97.2" # x-release-please-version
__version__ = "1.98.0" # x-release-please-version
128 changes: 110 additions & 18 deletions src/openai/resources/chat/completions/completions.py

Large diffs are not rendered by default.

128 changes: 110 additions & 18 deletions src/openai/resources/responses/responses.py

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions src/openai/types/chat/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@
from .chat_completion_store_message import ChatCompletionStoreMessage as ChatCompletionStoreMessage
from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort
from .chat_completion_content_part_text import ChatCompletionContentPartText as ChatCompletionContentPartText
from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall
from .chat_completion_content_part_image import ChatCompletionContentPartImage as ChatCompletionContentPartImage
from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam
from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam
from .chat_completion_user_message_param import ChatCompletionUserMessageParam as ChatCompletionUserMessageParam
Expand Down
27 changes: 27 additions & 0 deletions src/openai/types/chat/chat_completion_content_part_image.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import Optional
from typing_extensions import Literal

from ..._models import BaseModel

__all__ = ["ChatCompletionContentPartImage", "ImageURL"]


class ImageURL(BaseModel):
url: str
"""Either a URL of the image or the base64 encoded image data."""

detail: Optional[Literal["auto", "low", "high"]] = None
"""Specifies the detail level of the image.
Learn more in the
[Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
"""


class ChatCompletionContentPartImage(BaseModel):
image_url: ImageURL

type: Literal["image_url"]
"""The type of the content part."""
15 changes: 15 additions & 0 deletions src/openai/types/chat/chat_completion_content_part_text.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing_extensions import Literal

from ..._models import BaseModel

__all__ = ["ChatCompletionContentPartText"]


class ChatCompletionContentPartText(BaseModel):
text: str
"""The text content."""

type: Literal["text"]
"""The type of the content part."""
15 changes: 14 additions & 1 deletion src/openai/types/chat/chat_completion_store_message.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,23 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

from typing import List, Union, Optional
from typing_extensions import TypeAlias

from .chat_completion_message import ChatCompletionMessage
from .chat_completion_content_part_text import ChatCompletionContentPartText
from .chat_completion_content_part_image import ChatCompletionContentPartImage

__all__ = ["ChatCompletionStoreMessage", "ChatCompletionStoreMessageContentPart"]

__all__ = ["ChatCompletionStoreMessage"]
ChatCompletionStoreMessageContentPart: TypeAlias = Union[ChatCompletionContentPartText, ChatCompletionContentPartImage]


class ChatCompletionStoreMessage(ChatCompletionMessage):
id: str
"""The identifier of the chat message."""

content_parts: Optional[List[ChatCompletionStoreMessageContentPart]] = None
"""
If a content parts array was provided, this is an array of `text` and
`image_url` parts. Otherwise, null.
"""
25 changes: 21 additions & 4 deletions src/openai/types/chat/completion_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,13 @@ class CompletionCreateParamsBase(TypedDict, total=False):
far, increasing the model's likelihood to talk about new topics.
"""

prompt_cache_key: str
"""
Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
"""

reasoning_effort: Optional[ReasoningEffort]
"""**o-series models only**
Expand All @@ -199,6 +206,15 @@ class CompletionCreateParamsBase(TypedDict, total=False):
preferred for models that support it.
"""

safety_identifier: str
"""
A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""

seed: Optional[int]
"""
This feature is in Beta. If specified, our system will make a best effort to
Expand Down Expand Up @@ -293,11 +309,12 @@ class CompletionCreateParamsBase(TypedDict, total=False):
"""

user: str
"""A stable identifier for your end-users.
"""This field is being replaced by `safety_identifier` and `prompt_cache_key`.
Used to boost cache hit rates by better bucketing similar requests and to help
OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
Use `prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""

web_search_options: WebSearchOptions
Expand Down
25 changes: 21 additions & 4 deletions src/openai/types/responses/response.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,13 +163,29 @@ class Response(BaseModel):
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""

prompt_cache_key: Optional[str] = None
"""
Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
"""

reasoning: Optional[Reasoning] = None
"""**o-series models only**
Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
"""

safety_identifier: Optional[str] = None
"""
A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""

service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None
"""Specifies the processing type used for serving the request.
Expand Down Expand Up @@ -229,11 +245,12 @@ class Response(BaseModel):
"""

user: Optional[str] = None
"""A stable identifier for your end-users.
"""This field is being replaced by `safety_identifier` and `prompt_cache_key`.
Used to boost cache hit rates by better bucketing similar requests and to help
OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
Use `prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""

@property
Expand Down
25 changes: 21 additions & 4 deletions src/openai/types/responses/response_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,13 +123,29 @@ class ResponseCreateParamsBase(TypedDict, total=False):
[Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
"""

prompt_cache_key: str
"""
Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
"""

reasoning: Optional[Reasoning]
"""**o-series models only**
Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
"""

safety_identifier: str
"""
A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""

service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]]
"""Specifies the processing type used for serving the request.
Expand Down Expand Up @@ -221,11 +237,12 @@ class ResponseCreateParamsBase(TypedDict, total=False):
"""

user: str
"""A stable identifier for your end-users.
"""This field is being replaced by `safety_identifier` and `prompt_cache_key`.
Used to boost cache hit rates by better bucketing similar requests and to help
OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
Use `prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
"""


Expand Down
8 changes: 8 additions & 0 deletions tests/api_resources/chat/test_completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,10 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
"type": "content",
},
presence_penalty=-2,
prompt_cache_key="prompt-cache-key-1234",
reasoning_effort="low",
response_format={"type": "text"},
safety_identifier="safety-identifier-1234",
seed=-9007199254740991,
service_tier="auto",
stop="\n",
Expand Down Expand Up @@ -199,8 +201,10 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
"type": "content",
},
presence_penalty=-2,
prompt_cache_key="prompt-cache-key-1234",
reasoning_effort="low",
response_format={"type": "text"},
safety_identifier="safety-identifier-1234",
seed=-9007199254740991,
service_tier="auto",
stop="\n",
Expand Down Expand Up @@ -501,8 +505,10 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
"type": "content",
},
presence_penalty=-2,
prompt_cache_key="prompt-cache-key-1234",
reasoning_effort="low",
response_format={"type": "text"},
safety_identifier="safety-identifier-1234",
seed=-9007199254740991,
service_tier="auto",
stop="\n",
Expand Down Expand Up @@ -628,8 +634,10 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
"type": "content",
},
presence_penalty=-2,
prompt_cache_key="prompt-cache-key-1234",
reasoning_effort="low",
response_format={"type": "text"},
safety_identifier="safety-identifier-1234",
seed=-9007199254740991,
service_tier="auto",
stop="\n",
Expand Down
8 changes: 8 additions & 0 deletions tests/api_resources/test_responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,13 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None:
"variables": {"foo": "string"},
"version": "version",
},
prompt_cache_key="prompt-cache-key-1234",
reasoning={
"effort": "low",
"generate_summary": "auto",
"summary": "auto",
},
safety_identifier="safety-identifier-1234",
service_tier="auto",
store=True,
stream=False,
Expand Down Expand Up @@ -116,11 +118,13 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None:
"variables": {"foo": "string"},
"version": "version",
},
prompt_cache_key="prompt-cache-key-1234",
reasoning={
"effort": "low",
"generate_summary": "auto",
"summary": "auto",
},
safety_identifier="safety-identifier-1234",
service_tier="auto",
store=True,
temperature=1,
Expand Down Expand Up @@ -380,11 +384,13 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn
"variables": {"foo": "string"},
"version": "version",
},
prompt_cache_key="prompt-cache-key-1234",
reasoning={
"effort": "low",
"generate_summary": "auto",
"summary": "auto",
},
safety_identifier="safety-identifier-1234",
service_tier="auto",
store=True,
stream=False,
Expand Down Expand Up @@ -453,11 +459,13 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn
"variables": {"foo": "string"},
"version": "version",
},
prompt_cache_key="prompt-cache-key-1234",
reasoning={
"effort": "low",
"generate_summary": "auto",
"summary": "auto",
},
safety_identifier="safety-identifier-1234",
service_tier="auto",
store=True,
temperature=1,
Expand Down
Loading