diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1137af1259..d12300ea76 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.97.2" + ".": "1.98.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index 2dc4f680a9..e7fb0bdf9b 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-b2a451656ca64d30d174391ebfd94806b4de3ab76dc55b92843cfb7f1a54ecb6.yml -openapi_spec_hash: 27d9691b400f28c17ef063a1374048b0 -config_hash: e822d0c9082c8b312264403949243179 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-721e6ccaa72205ee14c71f8163129920464fb814b95d3df9567a9476bbd9b7fb.yml +openapi_spec_hash: 2115413a21df8b5bf9e4552a74df4312 +config_hash: 9606bb315a193bfd8da0459040143242 diff --git a/CHANGELOG.md b/CHANGELOG.md index 945e224cf9..669d5a5792 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Changelog +## 1.98.0 (2025-07-30) + +Full Changelog: [v1.97.2...v1.98.0](https://github.com/openai/openai-python/compare/v1.97.2...v1.98.0) + +### Features + +* **api:** manual updates ([88a8036](https://github.com/openai/openai-python/commit/88a8036c5ea186f36c57029ef4501a0833596f56)) + ## 1.97.2 (2025-07-30) Full Changelog: [v1.97.1...v1.97.2](https://github.com/openai/openai-python/compare/v1.97.1...v1.97.2) diff --git a/pyproject.toml b/pyproject.toml index 5b59053d02..6765611fc2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.97.2" +version = "1.98.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 59fb46ac23..ca890665bc 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.97.2" # x-release-please-version +__version__ = "1.98.0" # x-release-please-version diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index 739aa662d4..c851851418 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -248,8 +248,10 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -388,6 +390,10 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning_effort: **o-series models only** Constrains effort on reasoning for @@ -406,6 +412,12 @@ def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you @@ -481,9 +493,11 @@ def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -520,8 +534,10 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -668,6 +684,10 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning_effort: **o-series models only** Constrains effort on reasoning for @@ -686,6 +706,12 @@ def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you @@ -752,9 +778,11 @@ def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -791,8 +819,10 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -939,6 +969,10 @@ def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning_effort: **o-series models only** Constrains effort on reasoning for @@ -957,6 +991,12 @@ def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you @@ -1023,9 +1063,11 @@ def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -1061,8 +1103,10 @@ def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -1104,8 +1148,10 @@ def create( "parallel_tool_calls": parallel_tool_calls, "prediction": prediction, "presence_penalty": presence_penalty, + "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": response_format, + "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, "stop": stop, @@ -1615,8 +1661,10 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -1755,6 +1803,10 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning_effort: **o-series models only** Constrains effort on reasoning for @@ -1773,6 +1825,12 @@ async def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you @@ -1848,9 +1906,11 @@ async def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -1887,8 +1947,10 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -2035,6 +2097,10 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning_effort: **o-series models only** Constrains effort on reasoning for @@ -2053,6 +2119,12 @@ async def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you @@ -2119,9 +2191,11 @@ async def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -2158,8 +2232,10 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -2306,6 +2382,10 @@ async def create( whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning_effort: **o-series models only** Constrains effort on reasoning for @@ -2324,6 +2404,12 @@ async def create( ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + seed: This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you @@ -2390,9 +2476,11 @@ async def create( We generally recommend altering this or `temperature` but not both. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). web_search_options: This tool searches the web for relevant results to use in a response. Learn more about the @@ -2428,8 +2516,10 @@ async def create( parallel_tool_calls: bool | NotGiven = NOT_GIVEN, prediction: Optional[ChatCompletionPredictionContentParam] | NotGiven = NOT_GIVEN, presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN, response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -2471,8 +2561,10 @@ async def create( "parallel_tool_calls": parallel_tool_calls, "prediction": prediction, "presence_penalty": presence_penalty, + "prompt_cache_key": prompt_cache_key, "reasoning_effort": reasoning_effort, "response_format": response_format, + "safety_identifier": safety_identifier, "seed": seed, "service_tier": service_tier, "stop": stop, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index fe99aa851d..8de46dbab8 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -87,7 +87,9 @@ def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, @@ -188,11 +190,21 @@ def create( prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning: **o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + service_tier: Specifies the processing type used for serving the request. - If set to 'auto', then the request will be processed with the service tier @@ -267,9 +279,11 @@ def create( - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -297,7 +311,9 @@ def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -404,11 +420,21 @@ def create( prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning: **o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + service_tier: Specifies the processing type used for serving the request. - If set to 'auto', then the request will be processed with the service tier @@ -476,9 +502,11 @@ def create( - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -506,7 +534,9 @@ def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -613,11 +643,21 @@ def create( prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning: **o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + service_tier: Specifies the processing type used for serving the request. - If set to 'auto', then the request will be processed with the service tier @@ -685,9 +725,11 @@ def create( - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -713,7 +755,9 @@ def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, @@ -747,7 +791,9 @@ def create( "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, "service_tier": service_tier, "store": store, "stream": stream, @@ -1352,7 +1398,9 @@ async def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, @@ -1453,11 +1501,21 @@ async def create( prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning: **o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + service_tier: Specifies the processing type used for serving the request. - If set to 'auto', then the request will be processed with the service tier @@ -1532,9 +1590,11 @@ async def create( - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -1562,7 +1622,9 @@ async def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1669,11 +1731,21 @@ async def create( prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning: **o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + service_tier: Specifies the processing type used for serving the request. - If set to 'auto', then the request will be processed with the service tier @@ -1741,9 +1813,11 @@ async def create( - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -1771,7 +1845,9 @@ async def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, temperature: Optional[float] | NotGiven = NOT_GIVEN, @@ -1878,11 +1954,21 @@ async def create( prompt: Reference to a prompt template and its variables. [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). + prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + reasoning: **o-series models only** Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). + safety_identifier: A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + service_tier: Specifies the processing type used for serving the request. - If set to 'auto', then the request will be processed with the service tier @@ -1950,9 +2036,11 @@ async def create( - `disabled` (default): If a model response will exceed the context window size for a model, the request will fail with a 400 error. - user: A stable identifier for your end-users. Used to boost cache hit rates by better - bucketing similar requests and to help OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use + `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). extra_headers: Send extra headers @@ -1978,7 +2066,9 @@ async def create( parallel_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN, previous_response_id: Optional[str] | NotGiven = NOT_GIVEN, prompt: Optional[ResponsePromptParam] | NotGiven = NOT_GIVEN, + prompt_cache_key: str | NotGiven = NOT_GIVEN, reasoning: Optional[Reasoning] | NotGiven = NOT_GIVEN, + safety_identifier: str | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | NotGiven = NOT_GIVEN, store: Optional[bool] | NotGiven = NOT_GIVEN, stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN, @@ -2012,7 +2102,9 @@ async def create( "parallel_tool_calls": parallel_tool_calls, "previous_response_id": previous_response_id, "prompt": prompt, + "prompt_cache_key": prompt_cache_key, "reasoning": reasoning, + "safety_identifier": safety_identifier, "service_tier": service_tier, "store": store, "stream": stream, diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index 0945bcad11..dc26198567 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -28,7 +28,9 @@ from .chat_completion_store_message import ChatCompletionStoreMessage as ChatCompletionStoreMessage from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort +from .chat_completion_content_part_text import ChatCompletionContentPartText as ChatCompletionContentPartText from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall +from .chat_completion_content_part_image import ChatCompletionContentPartImage as ChatCompletionContentPartImage from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam from .chat_completion_user_message_param import ChatCompletionUserMessageParam as ChatCompletionUserMessageParam diff --git a/src/openai/types/chat/chat_completion_content_part_image.py b/src/openai/types/chat/chat_completion_content_part_image.py new file mode 100644 index 0000000000..c1386b9dd3 --- /dev/null +++ b/src/openai/types/chat/chat_completion_content_part_image.py @@ -0,0 +1,27 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ChatCompletionContentPartImage", "ImageURL"] + + +class ImageURL(BaseModel): + url: str + """Either a URL of the image or the base64 encoded image data.""" + + detail: Optional[Literal["auto", "low", "high"]] = None + """Specifies the detail level of the image. + + Learn more in the + [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + """ + + +class ChatCompletionContentPartImage(BaseModel): + image_url: ImageURL + + type: Literal["image_url"] + """The type of the content part.""" diff --git a/src/openai/types/chat/chat_completion_content_part_text.py b/src/openai/types/chat/chat_completion_content_part_text.py new file mode 100644 index 0000000000..f09f35f708 --- /dev/null +++ b/src/openai/types/chat/chat_completion_content_part_text.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ChatCompletionContentPartText"] + + +class ChatCompletionContentPartText(BaseModel): + text: str + """The text content.""" + + type: Literal["text"] + """The type of the content part.""" diff --git a/src/openai/types/chat/chat_completion_store_message.py b/src/openai/types/chat/chat_completion_store_message.py index 8dc093f7b8..661342716b 100644 --- a/src/openai/types/chat/chat_completion_store_message.py +++ b/src/openai/types/chat/chat_completion_store_message.py @@ -1,10 +1,23 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import List, Union, Optional +from typing_extensions import TypeAlias + from .chat_completion_message import ChatCompletionMessage +from .chat_completion_content_part_text import ChatCompletionContentPartText +from .chat_completion_content_part_image import ChatCompletionContentPartImage + +__all__ = ["ChatCompletionStoreMessage", "ChatCompletionStoreMessageContentPart"] -__all__ = ["ChatCompletionStoreMessage"] +ChatCompletionStoreMessageContentPart: TypeAlias = Union[ChatCompletionContentPartText, ChatCompletionContentPartImage] class ChatCompletionStoreMessage(ChatCompletionMessage): id: str """The identifier of the chat message.""" + + content_parts: Optional[List[ChatCompletionStoreMessageContentPart]] = None + """ + If a content parts array was provided, this is an array of `text` and + `image_url` parts. Otherwise, null. + """ diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 191793c18f..20d7c187f8 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -177,6 +177,13 @@ class CompletionCreateParamsBase(TypedDict, total=False): far, increasing the model's likelihood to talk about new topics. """ + prompt_cache_key: str + """ + Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + """ + reasoning_effort: Optional[ReasoningEffort] """**o-series models only** @@ -199,6 +206,15 @@ class CompletionCreateParamsBase(TypedDict, total=False): preferred for models that support it. """ + safety_identifier: str + """ + A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + """ + seed: Optional[int] """ This feature is in Beta. If specified, our system will make a best effort to @@ -293,11 +309,12 @@ class CompletionCreateParamsBase(TypedDict, total=False): """ user: str - """A stable identifier for your end-users. + """This field is being replaced by `safety_identifier` and `prompt_cache_key`. - Used to boost cache hit rates by better bucketing similar requests and to help - OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + Use `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ web_search_options: WebSearchOptions diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index 2af85d03fb..7db466dfe7 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -163,6 +163,13 @@ class Response(BaseModel): [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). """ + prompt_cache_key: Optional[str] = None + """ + Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + """ + reasoning: Optional[Reasoning] = None """**o-series models only** @@ -170,6 +177,15 @@ class Response(BaseModel): [reasoning models](https://platform.openai.com/docs/guides/reasoning). """ + safety_identifier: Optional[str] = None + """ + A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + """ + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = None """Specifies the processing type used for serving the request. @@ -229,11 +245,12 @@ class Response(BaseModel): """ user: Optional[str] = None - """A stable identifier for your end-users. + """This field is being replaced by `safety_identifier` and `prompt_cache_key`. - Used to boost cache hit rates by better bucketing similar requests and to help - OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + Use `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ @property diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index 08feefd081..4a78d7c028 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -123,6 +123,13 @@ class ResponseCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts). """ + prompt_cache_key: str + """ + Used by OpenAI to cache responses for similar requests to optimize your cache + hit rates. Replaces the `user` field. + [Learn more](https://platform.openai.com/docs/guides/prompt-caching). + """ + reasoning: Optional[Reasoning] """**o-series models only** @@ -130,6 +137,15 @@ class ResponseCreateParamsBase(TypedDict, total=False): [reasoning models](https://platform.openai.com/docs/guides/reasoning). """ + safety_identifier: str + """ + A stable identifier used to help detect users of your application that may be + violating OpenAI's usage policies. The IDs should be a string that uniquely + identifies each user. We recommend hashing their username or email address, in + order to avoid sending us any identifying information. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). + """ + service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] """Specifies the processing type used for serving the request. @@ -221,11 +237,12 @@ class ResponseCreateParamsBase(TypedDict, total=False): """ user: str - """A stable identifier for your end-users. + """This field is being replaced by `safety_identifier` and `prompt_cache_key`. - Used to boost cache hit rates by better bucketing similar requests and to help - OpenAI detect and prevent abuse. - [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + Use `prompt_cache_key` instead to maintain caching optimizations. A stable + identifier for your end-users. Used to boost cache hit rates by better bucketing + similar requests and to help OpenAI detect and prevent abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers). """ diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index aa8f58f0e5..2758d980ed 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -72,8 +72,10 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "type": "content", }, presence_penalty=-2, + prompt_cache_key="prompt-cache-key-1234", reasoning_effort="low", response_format={"type": "text"}, + safety_identifier="safety-identifier-1234", seed=-9007199254740991, service_tier="auto", stop="\n", @@ -199,8 +201,10 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "type": "content", }, presence_penalty=-2, + prompt_cache_key="prompt-cache-key-1234", reasoning_effort="low", response_format={"type": "text"}, + safety_identifier="safety-identifier-1234", seed=-9007199254740991, service_tier="auto", stop="\n", @@ -501,8 +505,10 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "type": "content", }, presence_penalty=-2, + prompt_cache_key="prompt-cache-key-1234", reasoning_effort="low", response_format={"type": "text"}, + safety_identifier="safety-identifier-1234", seed=-9007199254740991, service_tier="auto", stop="\n", @@ -628,8 +634,10 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "type": "content", }, presence_penalty=-2, + prompt_cache_key="prompt-cache-key-1234", reasoning_effort="low", response_format={"type": "text"}, + safety_identifier="safety-identifier-1234", seed=-9007199254740991, service_tier="auto", stop="\n", diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 158654ee70..63e47d8a69 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -43,11 +43,13 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "variables": {"foo": "string"}, "version": "version", }, + prompt_cache_key="prompt-cache-key-1234", reasoning={ "effort": "low", "generate_summary": "auto", "summary": "auto", }, + safety_identifier="safety-identifier-1234", service_tier="auto", store=True, stream=False, @@ -116,11 +118,13 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "variables": {"foo": "string"}, "version": "version", }, + prompt_cache_key="prompt-cache-key-1234", reasoning={ "effort": "low", "generate_summary": "auto", "summary": "auto", }, + safety_identifier="safety-identifier-1234", service_tier="auto", store=True, temperature=1, @@ -380,11 +384,13 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "variables": {"foo": "string"}, "version": "version", }, + prompt_cache_key="prompt-cache-key-1234", reasoning={ "effort": "low", "generate_summary": "auto", "summary": "auto", }, + safety_identifier="safety-identifier-1234", service_tier="auto", store=True, stream=False, @@ -453,11 +459,13 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "variables": {"foo": "string"}, "version": "version", }, + prompt_cache_key="prompt-cache-key-1234", reasoning={ "effort": "low", "generate_summary": "auto", "summary": "auto", }, + safety_identifier="safety-identifier-1234", service_tier="auto", store=True, temperature=1,