Add GPT-5 support (#150281)

This commit is contained in:
Denis Shulyaka
2025-08-08 21:49:09 +03:00
committed by Franck Nijhof
parent a1731cd210
commit 3d39fb08e5
6 changed files with 77 additions and 19 deletions

View File

@@ -49,6 +49,7 @@ from .const import (
CONF_RECOMMENDED, CONF_RECOMMENDED,
CONF_TEMPERATURE, CONF_TEMPERATURE,
CONF_TOP_P, CONF_TOP_P,
CONF_VERBOSITY,
CONF_WEB_SEARCH, CONF_WEB_SEARCH,
CONF_WEB_SEARCH_CITY, CONF_WEB_SEARCH_CITY,
CONF_WEB_SEARCH_CONTEXT_SIZE, CONF_WEB_SEARCH_CONTEXT_SIZE,
@@ -67,6 +68,7 @@ from .const import (
RECOMMENDED_REASONING_EFFORT, RECOMMENDED_REASONING_EFFORT,
RECOMMENDED_TEMPERATURE, RECOMMENDED_TEMPERATURE,
RECOMMENDED_TOP_P, RECOMMENDED_TOP_P,
RECOMMENDED_VERBOSITY,
RECOMMENDED_WEB_SEARCH, RECOMMENDED_WEB_SEARCH,
RECOMMENDED_WEB_SEARCH_CONTEXT_SIZE, RECOMMENDED_WEB_SEARCH_CONTEXT_SIZE,
RECOMMENDED_WEB_SEARCH_USER_LOCATION, RECOMMENDED_WEB_SEARCH_USER_LOCATION,
@@ -323,7 +325,7 @@ class OpenAISubentryFlowHandler(ConfigSubentryFlow):
model = options[CONF_CHAT_MODEL] model = options[CONF_CHAT_MODEL]
if model.startswith("o"): if model.startswith(("o", "gpt-5")):
step_schema.update( step_schema.update(
{ {
vol.Optional( vol.Optional(
@@ -331,7 +333,9 @@ class OpenAISubentryFlowHandler(ConfigSubentryFlow):
default=RECOMMENDED_REASONING_EFFORT, default=RECOMMENDED_REASONING_EFFORT,
): SelectSelector( ): SelectSelector(
SelectSelectorConfig( SelectSelectorConfig(
options=["low", "medium", "high"], options=["low", "medium", "high"]
if model.startswith("o")
else ["minimal", "low", "medium", "high"],
translation_key=CONF_REASONING_EFFORT, translation_key=CONF_REASONING_EFFORT,
mode=SelectSelectorMode.DROPDOWN, mode=SelectSelectorMode.DROPDOWN,
) )
@@ -341,6 +345,24 @@ class OpenAISubentryFlowHandler(ConfigSubentryFlow):
elif CONF_REASONING_EFFORT in options: elif CONF_REASONING_EFFORT in options:
options.pop(CONF_REASONING_EFFORT) options.pop(CONF_REASONING_EFFORT)
if model.startswith("gpt-5"):
step_schema.update(
{
vol.Optional(
CONF_VERBOSITY,
default=RECOMMENDED_VERBOSITY,
): SelectSelector(
SelectSelectorConfig(
options=["low", "medium", "high"],
translation_key=CONF_VERBOSITY,
mode=SelectSelectorMode.DROPDOWN,
)
),
}
)
elif CONF_VERBOSITY in options:
options.pop(CONF_VERBOSITY)
if self._subentry_type == "conversation" and not model.startswith( if self._subentry_type == "conversation" and not model.startswith(
tuple(UNSUPPORTED_WEB_SEARCH_MODELS) tuple(UNSUPPORTED_WEB_SEARCH_MODELS)
): ):

View File

@@ -21,6 +21,7 @@ CONF_REASONING_EFFORT = "reasoning_effort"
CONF_RECOMMENDED = "recommended" CONF_RECOMMENDED = "recommended"
CONF_TEMPERATURE = "temperature" CONF_TEMPERATURE = "temperature"
CONF_TOP_P = "top_p" CONF_TOP_P = "top_p"
CONF_VERBOSITY = "verbosity"
CONF_WEB_SEARCH = "web_search" CONF_WEB_SEARCH = "web_search"
CONF_WEB_SEARCH_USER_LOCATION = "user_location" CONF_WEB_SEARCH_USER_LOCATION = "user_location"
CONF_WEB_SEARCH_CONTEXT_SIZE = "search_context_size" CONF_WEB_SEARCH_CONTEXT_SIZE = "search_context_size"
@@ -34,6 +35,7 @@ RECOMMENDED_MAX_TOKENS = 3000
RECOMMENDED_REASONING_EFFORT = "low" RECOMMENDED_REASONING_EFFORT = "low"
RECOMMENDED_TEMPERATURE = 1.0 RECOMMENDED_TEMPERATURE = 1.0
RECOMMENDED_TOP_P = 1.0 RECOMMENDED_TOP_P = 1.0
RECOMMENDED_VERBOSITY = "medium"
RECOMMENDED_WEB_SEARCH = False RECOMMENDED_WEB_SEARCH = False
RECOMMENDED_WEB_SEARCH_CONTEXT_SIZE = "medium" RECOMMENDED_WEB_SEARCH_CONTEXT_SIZE = "medium"
RECOMMENDED_WEB_SEARCH_USER_LOCATION = False RECOMMENDED_WEB_SEARCH_USER_LOCATION = False

View File

@@ -61,6 +61,7 @@ from .const import (
CONF_REASONING_EFFORT, CONF_REASONING_EFFORT,
CONF_TEMPERATURE, CONF_TEMPERATURE,
CONF_TOP_P, CONF_TOP_P,
CONF_VERBOSITY,
CONF_WEB_SEARCH, CONF_WEB_SEARCH,
CONF_WEB_SEARCH_CITY, CONF_WEB_SEARCH_CITY,
CONF_WEB_SEARCH_CONTEXT_SIZE, CONF_WEB_SEARCH_CONTEXT_SIZE,
@@ -75,6 +76,7 @@ from .const import (
RECOMMENDED_REASONING_EFFORT, RECOMMENDED_REASONING_EFFORT,
RECOMMENDED_TEMPERATURE, RECOMMENDED_TEMPERATURE,
RECOMMENDED_TOP_P, RECOMMENDED_TOP_P,
RECOMMENDED_VERBOSITY,
RECOMMENDED_WEB_SEARCH_CONTEXT_SIZE, RECOMMENDED_WEB_SEARCH_CONTEXT_SIZE,
) )
@@ -346,14 +348,18 @@ class OpenAIBaseLLMEntity(Entity):
if tools: if tools:
model_args["tools"] = tools model_args["tools"] = tools
if model_args["model"].startswith("o"): if model_args["model"].startswith(("o", "gpt-5")):
model_args["reasoning"] = { model_args["reasoning"] = {
"effort": options.get( "effort": options.get(
CONF_REASONING_EFFORT, RECOMMENDED_REASONING_EFFORT CONF_REASONING_EFFORT, RECOMMENDED_REASONING_EFFORT
) )
} }
else: model_args["include"] = ["reasoning.encrypted_content"]
model_args["store"] = False
if model_args["model"].startswith("gpt-5"):
model_args["text"] = {
"verbosity": options.get(CONF_VERBOSITY, RECOMMENDED_VERBOSITY)
}
messages = [ messages = [
m m

View File

@@ -121,6 +121,7 @@
"selector": { "selector": {
"reasoning_effort": { "reasoning_effort": {
"options": { "options": {
"minimal": "Minimal",
"low": "[%key:common::state::low%]", "low": "[%key:common::state::low%]",
"medium": "[%key:common::state::medium%]", "medium": "[%key:common::state::medium%]",
"high": "[%key:common::state::high%]" "high": "[%key:common::state::high%]"
@@ -132,6 +133,13 @@
"medium": "[%key:common::state::medium%]", "medium": "[%key:common::state::medium%]",
"high": "[%key:common::state::high%]" "high": "[%key:common::state::high%]"
} }
},
"verbosity": {
"options": {
"low": "[%key:common::state::low%]",
"medium": "[%key:common::state::medium%]",
"high": "[%key:common::state::high%]"
}
} }
}, },
"services": { "services": {

View File

@@ -94,7 +94,7 @@ def mock_config_entry_with_reasoning_model(
hass.config_entries.async_update_subentry( hass.config_entries.async_update_subentry(
mock_config_entry, mock_config_entry,
next(iter(mock_config_entry.subentries.values())), next(iter(mock_config_entry.subentries.values())),
data={CONF_LLM_HASS_API: llm.LLM_API_ASSIST, CONF_CHAT_MODEL: "o4-mini"}, data={CONF_LLM_HASS_API: llm.LLM_API_ASSIST, CONF_CHAT_MODEL: "gpt-5-mini"},
) )
return mock_config_entry return mock_config_entry

View File

@@ -20,6 +20,7 @@ from homeassistant.components.openai_conversation.const import (
CONF_RECOMMENDED, CONF_RECOMMENDED,
CONF_TEMPERATURE, CONF_TEMPERATURE,
CONF_TOP_P, CONF_TOP_P,
CONF_VERBOSITY,
CONF_WEB_SEARCH, CONF_WEB_SEARCH,
CONF_WEB_SEARCH_CITY, CONF_WEB_SEARCH_CITY,
CONF_WEB_SEARCH_CONTEXT_SIZE, CONF_WEB_SEARCH_CONTEXT_SIZE,
@@ -302,7 +303,7 @@ async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> Non
( (
{ {
CONF_RECOMMENDED: False, CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate", CONF_PROMPT: "Speak like a pro",
}, },
{ {
CONF_TEMPERATURE: 1.0, CONF_TEMPERATURE: 1.0,
@@ -317,7 +318,7 @@ async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> Non
), ),
{ {
CONF_RECOMMENDED: False, CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate", CONF_PROMPT: "Speak like a pro",
CONF_TEMPERATURE: 1.0, CONF_TEMPERATURE: 1.0,
CONF_CHAT_MODEL: "o1-pro", CONF_CHAT_MODEL: "o1-pro",
CONF_TOP_P: RECOMMENDED_TOP_P, CONF_TOP_P: RECOMMENDED_TOP_P,
@@ -414,35 +415,51 @@ async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> Non
( # Case 2: reasoning model ( # Case 2: reasoning model
{ {
CONF_RECOMMENDED: False, CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pro", CONF_PROMPT: "Speak like a pirate",
CONF_TEMPERATURE: 0.8, CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "o1-pro", CONF_CHAT_MODEL: "gpt-5",
CONF_TOP_P: 0.9, CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000, CONF_MAX_TOKENS: 1000,
CONF_REASONING_EFFORT: "high", CONF_REASONING_EFFORT: "low",
CONF_VERBOSITY: "high",
CONF_CODE_INTERPRETER: False,
CONF_WEB_SEARCH: False,
CONF_WEB_SEARCH_CONTEXT_SIZE: "low",
CONF_WEB_SEARCH_USER_LOCATION: False,
}, },
( (
{ {
CONF_RECOMMENDED: False, CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pro", CONF_PROMPT: "Speak like a pirate",
}, },
{ {
CONF_TEMPERATURE: 0.8, CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "o1-pro", CONF_CHAT_MODEL: "gpt-5",
CONF_TOP_P: 0.9, CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000, CONF_MAX_TOKENS: 1000,
}, },
{CONF_REASONING_EFFORT: "high", CONF_CODE_INTERPRETER: False}, {
CONF_REASONING_EFFORT: "minimal",
CONF_CODE_INTERPRETER: False,
CONF_VERBOSITY: "high",
CONF_WEB_SEARCH: False,
CONF_WEB_SEARCH_CONTEXT_SIZE: "low",
CONF_WEB_SEARCH_USER_LOCATION: False,
},
), ),
{ {
CONF_RECOMMENDED: False, CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pro", CONF_PROMPT: "Speak like a pirate",
CONF_TEMPERATURE: 0.8, CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "o1-pro", CONF_CHAT_MODEL: "gpt-5",
CONF_TOP_P: 0.9, CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000, CONF_MAX_TOKENS: 1000,
CONF_REASONING_EFFORT: "high", CONF_REASONING_EFFORT: "minimal",
CONF_CODE_INTERPRETER: False, CONF_CODE_INTERPRETER: False,
CONF_VERBOSITY: "high",
CONF_WEB_SEARCH: False,
CONF_WEB_SEARCH_CONTEXT_SIZE: "low",
CONF_WEB_SEARCH_USER_LOCATION: False,
}, },
), ),
# Test that old options are removed after reconfiguration # Test that old options are removed after reconfiguration
@@ -482,11 +499,13 @@ async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> Non
CONF_PROMPT: "Speak like a pirate", CONF_PROMPT: "Speak like a pirate",
CONF_LLM_HASS_API: ["assist"], CONF_LLM_HASS_API: ["assist"],
CONF_TEMPERATURE: 0.8, CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "gpt-4o", CONF_CHAT_MODEL: "gpt-5",
CONF_TOP_P: 0.9, CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000, CONF_MAX_TOKENS: 1000,
CONF_REASONING_EFFORT: "high", CONF_REASONING_EFFORT: "high",
CONF_CODE_INTERPRETER: True, CONF_CODE_INTERPRETER: True,
CONF_VERBOSITY: "low",
CONF_WEB_SEARCH: False,
}, },
( (
{ {
@@ -550,11 +569,12 @@ async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> Non
CONF_PROMPT: "Speak like a pirate", CONF_PROMPT: "Speak like a pirate",
CONF_LLM_HASS_API: ["assist"], CONF_LLM_HASS_API: ["assist"],
CONF_TEMPERATURE: 0.8, CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "o3-mini", CONF_CHAT_MODEL: "o5",
CONF_TOP_P: 0.9, CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000, CONF_MAX_TOKENS: 1000,
CONF_REASONING_EFFORT: "low", CONF_REASONING_EFFORT: "low",
CONF_CODE_INTERPRETER: True, CONF_CODE_INTERPRETER: True,
CONF_VERBOSITY: "medium",
}, },
( (
{ {