Separate steps for openai_conversation options flow (#141533)

This commit is contained in:
Denis Shulyaka
2025-06-12 00:54:01 +03:00
committed by GitHub
parent e46e7f5a81
commit 8c9acf5a4d
3 changed files with 567 additions and 251 deletions

View File

@@ -2,10 +2,8 @@
from __future__ import annotations from __future__ import annotations
from collections.abc import Mapping
import json import json
import logging import logging
from types import MappingProxyType
from typing import Any from typing import Any
import openai import openai
@@ -77,7 +75,7 @@ STEP_USER_DATA_SCHEMA = vol.Schema(
RECOMMENDED_OPTIONS = { RECOMMENDED_OPTIONS = {
CONF_RECOMMENDED: True, CONF_RECOMMENDED: True,
CONF_LLM_HASS_API: llm.LLM_API_ASSIST, CONF_LLM_HASS_API: [llm.LLM_API_ASSIST],
CONF_PROMPT: llm.DEFAULT_INSTRUCTIONS_PROMPT, CONF_PROMPT: llm.DEFAULT_INSTRUCTIONS_PROMPT,
} }
@@ -142,55 +140,193 @@ class OpenAIOptionsFlow(OptionsFlow):
def __init__(self, config_entry: ConfigEntry) -> None: def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize options flow.""" """Initialize options flow."""
self.last_rendered_recommended = config_entry.options.get( self.options = config_entry.options.copy()
CONF_RECOMMENDED, False
)
async def async_step_init( async def async_step_init(
self, user_input: dict[str, Any] | None = None self, user_input: dict[str, Any] | None = None
) -> ConfigFlowResult: ) -> ConfigFlowResult:
"""Manage the options.""" """Manage initial options."""
options: dict[str, Any] | MappingProxyType[str, Any] = self.config_entry.options options = self.options
errors: dict[str, str] = {}
hass_apis: list[SelectOptionDict] = [
SelectOptionDict(
label=api.name,
value=api.id,
)
for api in llm.async_get_apis(self.hass)
]
if (suggested_llm_apis := options.get(CONF_LLM_HASS_API)) and isinstance(
suggested_llm_apis, str
):
options[CONF_LLM_HASS_API] = [suggested_llm_apis]
step_schema: VolDictType = {
vol.Optional(
CONF_PROMPT,
description={"suggested_value": llm.DEFAULT_INSTRUCTIONS_PROMPT},
): TemplateSelector(),
vol.Optional(CONF_LLM_HASS_API): SelectSelector(
SelectSelectorConfig(options=hass_apis, multiple=True)
),
vol.Required(
CONF_RECOMMENDED, default=options.get(CONF_RECOMMENDED, False)
): bool,
}
if user_input is not None: if user_input is not None:
if user_input[CONF_RECOMMENDED] == self.last_rendered_recommended: if not user_input.get(CONF_LLM_HASS_API):
if not user_input.get(CONF_LLM_HASS_API): user_input.pop(CONF_LLM_HASS_API, None)
user_input.pop(CONF_LLM_HASS_API, None)
if user_input.get(CONF_CHAT_MODEL) in UNSUPPORTED_MODELS:
errors[CONF_CHAT_MODEL] = "model_not_supported"
if user_input.get(CONF_WEB_SEARCH): if user_input[CONF_RECOMMENDED]:
if ( return self.async_create_entry(title="", data=user_input)
user_input.get(CONF_CHAT_MODEL, RECOMMENDED_CHAT_MODEL)
not in WEB_SEARCH_MODELS
):
errors[CONF_WEB_SEARCH] = "web_search_not_supported"
elif user_input.get(CONF_WEB_SEARCH_USER_LOCATION):
user_input.update(await self.get_location_data())
if not errors: options.update(user_input)
return self.async_create_entry(title="", data=user_input) if CONF_LLM_HASS_API in options and CONF_LLM_HASS_API not in user_input:
else: options.pop(CONF_LLM_HASS_API)
# Re-render the options again, now with the recommended options shown/hidden return await self.async_step_advanced()
self.last_rendered_recommended = user_input[CONF_RECOMMENDED]
options = {
CONF_RECOMMENDED: user_input[CONF_RECOMMENDED],
CONF_PROMPT: user_input.get(
CONF_PROMPT, llm.DEFAULT_INSTRUCTIONS_PROMPT
),
CONF_LLM_HASS_API: user_input.get(CONF_LLM_HASS_API),
}
schema = openai_config_option_schema(self.hass, options)
return self.async_show_form( return self.async_show_form(
step_id="init", step_id="init",
data_schema=vol.Schema(schema), data_schema=self.add_suggested_values_to_schema(
vol.Schema(step_schema), options
),
)
async def async_step_advanced(
self, user_input: dict[str, Any] | None = None
) -> ConfigFlowResult:
"""Manage advanced options."""
options = self.options
errors: dict[str, str] = {}
step_schema: VolDictType = {
vol.Optional(
CONF_CHAT_MODEL,
default=RECOMMENDED_CHAT_MODEL,
): str,
vol.Optional(
CONF_MAX_TOKENS,
default=RECOMMENDED_MAX_TOKENS,
): int,
vol.Optional(
CONF_TOP_P,
default=RECOMMENDED_TOP_P,
): NumberSelector(NumberSelectorConfig(min=0, max=1, step=0.05)),
vol.Optional(
CONF_TEMPERATURE,
default=RECOMMENDED_TEMPERATURE,
): NumberSelector(NumberSelectorConfig(min=0, max=2, step=0.05)),
}
if user_input is not None:
options.update(user_input)
if user_input.get(CONF_CHAT_MODEL) in UNSUPPORTED_MODELS:
errors[CONF_CHAT_MODEL] = "model_not_supported"
if not errors:
return await self.async_step_model()
return self.async_show_form(
step_id="advanced",
data_schema=self.add_suggested_values_to_schema(
vol.Schema(step_schema), options
),
errors=errors, errors=errors,
) )
async def get_location_data(self) -> dict[str, str]: async def async_step_model(
self, user_input: dict[str, Any] | None = None
) -> ConfigFlowResult:
"""Manage model-specific options."""
options = self.options
errors: dict[str, str] = {}
step_schema: VolDictType = {}
model = options[CONF_CHAT_MODEL]
if model.startswith("o"):
step_schema.update(
{
vol.Optional(
CONF_REASONING_EFFORT,
default=RECOMMENDED_REASONING_EFFORT,
): SelectSelector(
SelectSelectorConfig(
options=["low", "medium", "high"],
translation_key=CONF_REASONING_EFFORT,
mode=SelectSelectorMode.DROPDOWN,
)
),
}
)
elif CONF_REASONING_EFFORT in options:
options.pop(CONF_REASONING_EFFORT)
if model.startswith(tuple(WEB_SEARCH_MODELS)):
step_schema.update(
{
vol.Optional(
CONF_WEB_SEARCH,
default=RECOMMENDED_WEB_SEARCH,
): bool,
vol.Optional(
CONF_WEB_SEARCH_CONTEXT_SIZE,
default=RECOMMENDED_WEB_SEARCH_CONTEXT_SIZE,
): SelectSelector(
SelectSelectorConfig(
options=["low", "medium", "high"],
translation_key=CONF_WEB_SEARCH_CONTEXT_SIZE,
mode=SelectSelectorMode.DROPDOWN,
)
),
vol.Optional(
CONF_WEB_SEARCH_USER_LOCATION,
default=RECOMMENDED_WEB_SEARCH_USER_LOCATION,
): bool,
}
)
elif CONF_WEB_SEARCH in options:
options = {
k: v
for k, v in options.items()
if k
not in (
CONF_WEB_SEARCH,
CONF_WEB_SEARCH_CONTEXT_SIZE,
CONF_WEB_SEARCH_USER_LOCATION,
CONF_WEB_SEARCH_CITY,
CONF_WEB_SEARCH_REGION,
CONF_WEB_SEARCH_COUNTRY,
CONF_WEB_SEARCH_TIMEZONE,
)
}
if not step_schema:
return self.async_create_entry(title="", data=options)
if user_input is not None:
if user_input.get(CONF_WEB_SEARCH):
if user_input.get(CONF_WEB_SEARCH_USER_LOCATION):
user_input.update(await self._get_location_data())
else:
options.pop(CONF_WEB_SEARCH_CITY, None)
options.pop(CONF_WEB_SEARCH_REGION, None)
options.pop(CONF_WEB_SEARCH_COUNTRY, None)
options.pop(CONF_WEB_SEARCH_TIMEZONE, None)
options.update(user_input)
return self.async_create_entry(title="", data=options)
return self.async_show_form(
step_id="model",
data_schema=self.add_suggested_values_to_schema(
vol.Schema(step_schema), options
),
errors=errors,
)
async def _get_location_data(self) -> dict[str, str]:
"""Get approximate location data of the user.""" """Get approximate location data of the user."""
location_data: dict[str, str] = {} location_data: dict[str, str] = {}
zone_home = self.hass.states.get(ENTITY_ID_HOME) zone_home = self.hass.states.get(ENTITY_ID_HOME)
@@ -242,103 +378,3 @@ class OpenAIOptionsFlow(OptionsFlow):
_LOGGER.debug("Location data: %s", location_data) _LOGGER.debug("Location data: %s", location_data)
return location_data return location_data
def openai_config_option_schema(
hass: HomeAssistant,
options: Mapping[str, Any],
) -> VolDictType:
"""Return a schema for OpenAI completion options."""
hass_apis: list[SelectOptionDict] = [
SelectOptionDict(
label=api.name,
value=api.id,
)
for api in llm.async_get_apis(hass)
]
if (suggested_llm_apis := options.get(CONF_LLM_HASS_API)) and isinstance(
suggested_llm_apis, str
):
suggested_llm_apis = [suggested_llm_apis]
schema: VolDictType = {
vol.Optional(
CONF_PROMPT,
description={
"suggested_value": options.get(
CONF_PROMPT, llm.DEFAULT_INSTRUCTIONS_PROMPT
)
},
): TemplateSelector(),
vol.Optional(
CONF_LLM_HASS_API,
description={"suggested_value": suggested_llm_apis},
): SelectSelector(SelectSelectorConfig(options=hass_apis, multiple=True)),
vol.Required(
CONF_RECOMMENDED, default=options.get(CONF_RECOMMENDED, False)
): bool,
}
if options.get(CONF_RECOMMENDED):
return schema
schema.update(
{
vol.Optional(
CONF_CHAT_MODEL,
description={"suggested_value": options.get(CONF_CHAT_MODEL)},
default=RECOMMENDED_CHAT_MODEL,
): str,
vol.Optional(
CONF_MAX_TOKENS,
description={"suggested_value": options.get(CONF_MAX_TOKENS)},
default=RECOMMENDED_MAX_TOKENS,
): int,
vol.Optional(
CONF_TOP_P,
description={"suggested_value": options.get(CONF_TOP_P)},
default=RECOMMENDED_TOP_P,
): NumberSelector(NumberSelectorConfig(min=0, max=1, step=0.05)),
vol.Optional(
CONF_TEMPERATURE,
description={"suggested_value": options.get(CONF_TEMPERATURE)},
default=RECOMMENDED_TEMPERATURE,
): NumberSelector(NumberSelectorConfig(min=0, max=2, step=0.05)),
vol.Optional(
CONF_REASONING_EFFORT,
description={"suggested_value": options.get(CONF_REASONING_EFFORT)},
default=RECOMMENDED_REASONING_EFFORT,
): SelectSelector(
SelectSelectorConfig(
options=["low", "medium", "high"],
translation_key=CONF_REASONING_EFFORT,
mode=SelectSelectorMode.DROPDOWN,
)
),
vol.Optional(
CONF_WEB_SEARCH,
description={"suggested_value": options.get(CONF_WEB_SEARCH)},
default=RECOMMENDED_WEB_SEARCH,
): bool,
vol.Optional(
CONF_WEB_SEARCH_CONTEXT_SIZE,
description={
"suggested_value": options.get(CONF_WEB_SEARCH_CONTEXT_SIZE)
},
default=RECOMMENDED_WEB_SEARCH_CONTEXT_SIZE,
): SelectSelector(
SelectSelectorConfig(
options=["low", "medium", "high"],
translation_key=CONF_WEB_SEARCH_CONTEXT_SIZE,
mode=SelectSelectorMode.DROPDOWN,
)
),
vol.Optional(
CONF_WEB_SEARCH_USER_LOCATION,
description={
"suggested_value": options.get(CONF_WEB_SEARCH_USER_LOCATION)
},
default=RECOMMENDED_WEB_SEARCH_USER_LOCATION,
): bool,
}
)
return schema

View File

@@ -18,20 +18,32 @@
"init": { "init": {
"data": { "data": {
"prompt": "Instructions", "prompt": "Instructions",
"llm_hass_api": "[%key:common::config_flow::data::llm_hass_api%]",
"recommended": "Recommended model settings"
},
"data_description": {
"prompt": "Instruct how the LLM should respond. This can be a template."
}
},
"advanced": {
"title": "Advanced settings",
"data": {
"chat_model": "[%key:common::generic::model%]", "chat_model": "[%key:common::generic::model%]",
"max_tokens": "Maximum tokens to return in response", "max_tokens": "Maximum tokens to return in response",
"temperature": "Temperature", "temperature": "Temperature",
"top_p": "Top P", "top_p": "Top P"
"llm_hass_api": "[%key:common::config_flow::data::llm_hass_api%]", }
"recommended": "Recommended model settings", },
"model": {
"title": "Model-specific options",
"data": {
"reasoning_effort": "Reasoning effort", "reasoning_effort": "Reasoning effort",
"web_search": "Enable web search", "web_search": "Enable web search",
"search_context_size": "Search context size", "search_context_size": "Search context size",
"user_location": "Include home location" "user_location": "Include home location"
}, },
"data_description": { "data_description": {
"prompt": "Instruct how the LLM should respond. This can be a template.", "reasoning_effort": "How many reasoning tokens the model should generate before creating a response to the prompt",
"reasoning_effort": "How many reasoning tokens the model should generate before creating a response to the prompt (for certain reasoning models)",
"web_search": "Allow the model to search the web for the latest information before generating a response", "web_search": "Allow the model to search the web for the latest information before generating a response",
"search_context_size": "High level guidance for the amount of context window space to use for the search", "search_context_size": "High level guidance for the amount of context window space to use for the search",
"user_location": "Refine search results based on geography" "user_location": "Refine search results based on geography"
@@ -39,8 +51,7 @@
} }
}, },
"error": { "error": {
"model_not_supported": "This model is not supported, please select a different model", "model_not_supported": "This model is not supported, please select a different model"
"web_search_not_supported": "Web search is not supported by this model"
} }
}, },
"selector": { "selector": {

View File

@@ -27,7 +27,6 @@ from homeassistant.components.openai_conversation.const import (
DOMAIN, DOMAIN,
RECOMMENDED_CHAT_MODEL, RECOMMENDED_CHAT_MODEL,
RECOMMENDED_MAX_TOKENS, RECOMMENDED_MAX_TOKENS,
RECOMMENDED_REASONING_EFFORT,
RECOMMENDED_TOP_P, RECOMMENDED_TOP_P,
) )
from homeassistant.const import CONF_LLM_HASS_API from homeassistant.const import CONF_LLM_HASS_API
@@ -77,10 +76,10 @@ async def test_form(hass: HomeAssistant) -> None:
assert len(mock_setup_entry.mock_calls) == 1 assert len(mock_setup_entry.mock_calls) == 1
async def test_options( async def test_options_recommended(
hass: HomeAssistant, mock_config_entry, mock_init_component hass: HomeAssistant, mock_config_entry, mock_init_component
) -> None: ) -> None:
"""Test the options form.""" """Test the options flow with recommended settings."""
options_flow = await hass.config_entries.options.async_init( options_flow = await hass.config_entries.options.async_init(
mock_config_entry.entry_id mock_config_entry.entry_id
) )
@@ -88,14 +87,12 @@ async def test_options(
options_flow["flow_id"], options_flow["flow_id"],
{ {
"prompt": "Speak like a pirate", "prompt": "Speak like a pirate",
"max_tokens": 200, "recommended": True,
}, },
) )
await hass.async_block_till_done() await hass.async_block_till_done()
assert options["type"] is FlowResultType.CREATE_ENTRY assert options["type"] is FlowResultType.CREATE_ENTRY
assert options["data"]["prompt"] == "Speak like a pirate" assert options["data"]["prompt"] == "Speak like a pirate"
assert options["data"]["max_tokens"] == 200
assert options["data"][CONF_CHAT_MODEL] == RECOMMENDED_CHAT_MODEL
async def test_options_unsupported_model( async def test_options_unsupported_model(
@@ -105,18 +102,32 @@ async def test_options_unsupported_model(
options_flow = await hass.config_entries.options.async_init( options_flow = await hass.config_entries.options.async_init(
mock_config_entry.entry_id mock_config_entry.entry_id
) )
result = await hass.config_entries.options.async_configure( assert options_flow["type"] == FlowResultType.FORM
assert options_flow["step_id"] == "init"
# Configure initial step
options_flow = await hass.config_entries.options.async_configure(
options_flow["flow_id"], options_flow["flow_id"],
{ {
CONF_RECOMMENDED: False, CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate", CONF_PROMPT: "Speak like a pirate",
CONF_CHAT_MODEL: "o1-mini",
CONF_LLM_HASS_API: ["assist"], CONF_LLM_HASS_API: ["assist"],
}, },
) )
await hass.async_block_till_done() await hass.async_block_till_done()
assert result["type"] is FlowResultType.FORM assert options_flow["type"] == FlowResultType.FORM
assert result["errors"] == {"chat_model": "model_not_supported"} assert options_flow["step_id"] == "advanced"
# Configure advanced step
options_flow = await hass.config_entries.options.async_configure(
options_flow["flow_id"],
{
CONF_CHAT_MODEL: "o1-mini",
},
)
await hass.async_block_till_done()
assert options_flow["type"] is FlowResultType.FORM
assert options_flow["errors"] == {"chat_model": "model_not_supported"}
@pytest.mark.parametrize( @pytest.mark.parametrize(
@@ -165,70 +176,322 @@ async def test_form_invalid_auth(hass: HomeAssistant, side_effect, error) -> Non
@pytest.mark.parametrize( @pytest.mark.parametrize(
("current_options", "new_options", "expected_options"), ("current_options", "new_options", "expected_options"),
[ [
( ( # Test converting single llm api format to list
{
CONF_RECOMMENDED: True,
CONF_PROMPT: "bla",
},
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_TEMPERATURE: 0.3,
},
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_TEMPERATURE: 0.3,
CONF_CHAT_MODEL: RECOMMENDED_CHAT_MODEL,
CONF_TOP_P: RECOMMENDED_TOP_P,
CONF_MAX_TOKENS: RECOMMENDED_MAX_TOKENS,
CONF_REASONING_EFFORT: RECOMMENDED_REASONING_EFFORT,
CONF_WEB_SEARCH: False,
CONF_WEB_SEARCH_CONTEXT_SIZE: "medium",
CONF_WEB_SEARCH_USER_LOCATION: False,
},
),
(
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_TEMPERATURE: 0.3,
CONF_CHAT_MODEL: RECOMMENDED_CHAT_MODEL,
CONF_TOP_P: RECOMMENDED_TOP_P,
CONF_MAX_TOKENS: RECOMMENDED_MAX_TOKENS,
CONF_REASONING_EFFORT: RECOMMENDED_REASONING_EFFORT,
CONF_WEB_SEARCH: False,
CONF_WEB_SEARCH_CONTEXT_SIZE: "medium",
CONF_WEB_SEARCH_USER_LOCATION: False,
},
{
CONF_RECOMMENDED: True,
CONF_LLM_HASS_API: ["assist"],
CONF_PROMPT: "",
},
{
CONF_RECOMMENDED: True,
CONF_LLM_HASS_API: ["assist"],
CONF_PROMPT: "",
},
),
(
{ {
CONF_RECOMMENDED: True, CONF_RECOMMENDED: True,
CONF_LLM_HASS_API: "assist", CONF_LLM_HASS_API: "assist",
CONF_PROMPT: "", CONF_PROMPT: "",
}, },
(
{
CONF_RECOMMENDED: True,
CONF_LLM_HASS_API: ["assist"],
CONF_PROMPT: "",
},
),
{ {
CONF_RECOMMENDED: True, CONF_RECOMMENDED: True,
CONF_LLM_HASS_API: ["assist"], CONF_LLM_HASS_API: ["assist"],
CONF_PROMPT: "", CONF_PROMPT: "",
}, },
),
( # options with no model-specific settings
{},
(
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
},
{
CONF_TEMPERATURE: 1.0,
CONF_CHAT_MODEL: "gpt-4.5-preview",
CONF_TOP_P: RECOMMENDED_TOP_P,
CONF_MAX_TOKENS: RECOMMENDED_MAX_TOKENS,
},
),
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_TEMPERATURE: 1.0,
CONF_CHAT_MODEL: "gpt-4.5-preview",
CONF_TOP_P: RECOMMENDED_TOP_P,
CONF_MAX_TOKENS: RECOMMENDED_MAX_TOKENS,
},
),
( # options for reasoning models
{},
(
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
},
{
CONF_TEMPERATURE: 1.0,
CONF_CHAT_MODEL: "o1-pro",
CONF_TOP_P: RECOMMENDED_TOP_P,
CONF_MAX_TOKENS: 10000,
},
{
CONF_REASONING_EFFORT: "high",
},
),
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_TEMPERATURE: 1.0,
CONF_CHAT_MODEL: "o1-pro",
CONF_TOP_P: RECOMMENDED_TOP_P,
CONF_MAX_TOKENS: 10000,
CONF_REASONING_EFFORT: "high",
},
),
( # options for web search without user location
{
CONF_RECOMMENDED: True,
CONF_PROMPT: "bla",
},
(
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
},
{
CONF_TEMPERATURE: 0.3,
CONF_CHAT_MODEL: RECOMMENDED_CHAT_MODEL,
CONF_TOP_P: RECOMMENDED_TOP_P,
CONF_MAX_TOKENS: RECOMMENDED_MAX_TOKENS,
},
{
CONF_WEB_SEARCH: True,
CONF_WEB_SEARCH_CONTEXT_SIZE: "low",
CONF_WEB_SEARCH_USER_LOCATION: False,
},
),
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_TEMPERATURE: 0.3,
CONF_CHAT_MODEL: RECOMMENDED_CHAT_MODEL,
CONF_TOP_P: RECOMMENDED_TOP_P,
CONF_MAX_TOKENS: RECOMMENDED_MAX_TOKENS,
CONF_WEB_SEARCH: True,
CONF_WEB_SEARCH_CONTEXT_SIZE: "low",
CONF_WEB_SEARCH_USER_LOCATION: False,
},
),
# Test that current options are showed as suggested values
( # Case 1: web search
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like super Mario",
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "gpt-4o",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_WEB_SEARCH: True,
CONF_WEB_SEARCH_CONTEXT_SIZE: "low",
CONF_WEB_SEARCH_USER_LOCATION: True,
CONF_WEB_SEARCH_CITY: "San Francisco",
CONF_WEB_SEARCH_REGION: "California",
CONF_WEB_SEARCH_COUNTRY: "US",
CONF_WEB_SEARCH_TIMEZONE: "America/Los_Angeles",
},
(
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like super Mario",
},
{
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "gpt-4o",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
},
{
CONF_WEB_SEARCH: True,
CONF_WEB_SEARCH_CONTEXT_SIZE: "low",
CONF_WEB_SEARCH_USER_LOCATION: False,
},
),
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like super Mario",
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "gpt-4o",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_WEB_SEARCH: True,
CONF_WEB_SEARCH_CONTEXT_SIZE: "low",
CONF_WEB_SEARCH_USER_LOCATION: False,
},
),
( # Case 2: reasoning model
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pro",
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "o1-pro",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_REASONING_EFFORT: "high",
},
(
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pro",
},
{
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "o1-pro",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
},
{CONF_REASONING_EFFORT: "high"},
),
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pro",
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "o1-pro",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_REASONING_EFFORT: "high",
},
),
# Test that old options are removed after reconfiguration
( # Case 1: web search to recommended
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "gpt-4o",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_WEB_SEARCH: True,
CONF_WEB_SEARCH_CONTEXT_SIZE: "low",
CONF_WEB_SEARCH_USER_LOCATION: True,
CONF_WEB_SEARCH_CITY: "San Francisco",
CONF_WEB_SEARCH_REGION: "California",
CONF_WEB_SEARCH_COUNTRY: "US",
CONF_WEB_SEARCH_TIMEZONE: "America/Los_Angeles",
},
(
{
CONF_RECOMMENDED: True,
CONF_LLM_HASS_API: ["assist"],
CONF_PROMPT: "",
},
),
{ {
CONF_RECOMMENDED: True, CONF_RECOMMENDED: True,
CONF_LLM_HASS_API: ["assist"], CONF_LLM_HASS_API: ["assist"],
CONF_PROMPT: "", CONF_PROMPT: "",
}, },
), ),
( # Case 2: reasoning to recommended
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_LLM_HASS_API: ["assist"],
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "gpt-4o",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_REASONING_EFFORT: "high",
},
(
{
CONF_RECOMMENDED: True,
CONF_PROMPT: "Speak like a pirate",
},
),
{
CONF_RECOMMENDED: True,
CONF_PROMPT: "Speak like a pirate",
},
),
( # Case 3: web search to reasoning
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_LLM_HASS_API: ["assist"],
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "gpt-4o",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_WEB_SEARCH: True,
CONF_WEB_SEARCH_CONTEXT_SIZE: "low",
CONF_WEB_SEARCH_USER_LOCATION: True,
CONF_WEB_SEARCH_CITY: "San Francisco",
CONF_WEB_SEARCH_REGION: "California",
CONF_WEB_SEARCH_COUNTRY: "US",
CONF_WEB_SEARCH_TIMEZONE: "America/Los_Angeles",
},
(
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
},
{
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "o3-mini",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
},
{
CONF_REASONING_EFFORT: "low",
},
),
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "o3-mini",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_REASONING_EFFORT: "low",
},
),
( # Case 4: reasoning to web search
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_LLM_HASS_API: ["assist"],
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "o3-mini",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_REASONING_EFFORT: "low",
},
(
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
},
{
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "gpt-4o",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
},
{
CONF_WEB_SEARCH: True,
CONF_WEB_SEARCH_CONTEXT_SIZE: "high",
CONF_WEB_SEARCH_USER_LOCATION: False,
},
),
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_TEMPERATURE: 0.8,
CONF_CHAT_MODEL: "gpt-4o",
CONF_TOP_P: 0.9,
CONF_MAX_TOKENS: 1000,
CONF_WEB_SEARCH: True,
CONF_WEB_SEARCH_CONTEXT_SIZE: "high",
CONF_WEB_SEARCH_USER_LOCATION: False,
},
),
], ],
) )
async def test_options_switching( async def test_options_switching(
@@ -241,22 +504,31 @@ async def test_options_switching(
) -> None: ) -> None:
"""Test the options form.""" """Test the options form."""
hass.config_entries.async_update_entry(mock_config_entry, options=current_options) hass.config_entries.async_update_entry(mock_config_entry, options=current_options)
options_flow = await hass.config_entries.options.async_init( options = await hass.config_entries.options.async_init(mock_config_entry.entry_id)
mock_config_entry.entry_id assert options["step_id"] == "init"
)
if current_options.get(CONF_RECOMMENDED) != new_options.get(CONF_RECOMMENDED): for step_options in new_options:
options_flow = await hass.config_entries.options.async_configure( assert options["type"] == FlowResultType.FORM
options_flow["flow_id"],
{ # Test that current options are showed as suggested values:
**current_options, for key in options["data_schema"].schema:
CONF_RECOMMENDED: new_options[CONF_RECOMMENDED], if (
}, isinstance(key.description, dict)
and "suggested_value" in key.description
and key in current_options
):
current_option = current_options[key]
if key == CONF_LLM_HASS_API and isinstance(current_option, str):
current_option = [current_option]
assert key.description["suggested_value"] == current_option
# Configure current step
options = await hass.config_entries.options.async_configure(
options["flow_id"],
step_options,
) )
options = await hass.config_entries.options.async_configure( await hass.async_block_till_done()
options_flow["flow_id"],
new_options,
)
await hass.async_block_till_done()
assert options["type"] is FlowResultType.CREATE_ENTRY assert options["type"] is FlowResultType.CREATE_ENTRY
assert options["data"] == expected_options assert options["data"] == expected_options
@@ -265,9 +537,35 @@ async def test_options_web_search_user_location(
hass: HomeAssistant, mock_config_entry, mock_init_component hass: HomeAssistant, mock_config_entry, mock_init_component
) -> None: ) -> None:
"""Test fetching user location.""" """Test fetching user location."""
options_flow = await hass.config_entries.options.async_init( options = await hass.config_entries.options.async_init(mock_config_entry.entry_id)
mock_config_entry.entry_id assert options["type"] == FlowResultType.FORM
assert options["step_id"] == "init"
# Configure initial step
options = await hass.config_entries.options.async_configure(
options["flow_id"],
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
},
) )
assert options["type"] == FlowResultType.FORM
assert options["step_id"] == "advanced"
# Configure advanced step
options = await hass.config_entries.options.async_configure(
options["flow_id"],
{
CONF_TEMPERATURE: 1.0,
CONF_CHAT_MODEL: RECOMMENDED_CHAT_MODEL,
CONF_TOP_P: RECOMMENDED_TOP_P,
CONF_MAX_TOKENS: RECOMMENDED_MAX_TOKENS,
},
)
await hass.async_block_till_done()
assert options["type"] == FlowResultType.FORM
assert options["step_id"] == "model"
hass.config.country = "US" hass.config.country = "US"
hass.config.time_zone = "America/Los_Angeles" hass.config.time_zone = "America/Los_Angeles"
hass.states.async_set( hass.states.async_set(
@@ -302,16 +600,10 @@ async def test_options_web_search_user_location(
], ],
) )
# Configure model step
options = await hass.config_entries.options.async_configure( options = await hass.config_entries.options.async_configure(
options_flow["flow_id"], options["flow_id"],
{ {
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_TEMPERATURE: 1.0,
CONF_CHAT_MODEL: RECOMMENDED_CHAT_MODEL,
CONF_TOP_P: RECOMMENDED_TOP_P,
CONF_MAX_TOKENS: RECOMMENDED_MAX_TOKENS,
CONF_REASONING_EFFORT: RECOMMENDED_REASONING_EFFORT,
CONF_WEB_SEARCH: True, CONF_WEB_SEARCH: True,
CONF_WEB_SEARCH_CONTEXT_SIZE: "medium", CONF_WEB_SEARCH_CONTEXT_SIZE: "medium",
CONF_WEB_SEARCH_USER_LOCATION: True, CONF_WEB_SEARCH_USER_LOCATION: True,
@@ -330,7 +622,6 @@ async def test_options_web_search_user_location(
CONF_CHAT_MODEL: RECOMMENDED_CHAT_MODEL, CONF_CHAT_MODEL: RECOMMENDED_CHAT_MODEL,
CONF_TOP_P: RECOMMENDED_TOP_P, CONF_TOP_P: RECOMMENDED_TOP_P,
CONF_MAX_TOKENS: RECOMMENDED_MAX_TOKENS, CONF_MAX_TOKENS: RECOMMENDED_MAX_TOKENS,
CONF_REASONING_EFFORT: RECOMMENDED_REASONING_EFFORT,
CONF_WEB_SEARCH: True, CONF_WEB_SEARCH: True,
CONF_WEB_SEARCH_CONTEXT_SIZE: "medium", CONF_WEB_SEARCH_CONTEXT_SIZE: "medium",
CONF_WEB_SEARCH_USER_LOCATION: True, CONF_WEB_SEARCH_USER_LOCATION: True,
@@ -339,25 +630,3 @@ async def test_options_web_search_user_location(
CONF_WEB_SEARCH_COUNTRY: "US", CONF_WEB_SEARCH_COUNTRY: "US",
CONF_WEB_SEARCH_TIMEZONE: "America/Los_Angeles", CONF_WEB_SEARCH_TIMEZONE: "America/Los_Angeles",
} }
async def test_options_web_search_unsupported_model(
hass: HomeAssistant, mock_config_entry, mock_init_component
) -> None:
"""Test the options form giving error about web search not being available."""
options_flow = await hass.config_entries.options.async_init(
mock_config_entry.entry_id
)
result = await hass.config_entries.options.async_configure(
options_flow["flow_id"],
{
CONF_RECOMMENDED: False,
CONF_PROMPT: "Speak like a pirate",
CONF_CHAT_MODEL: "o1-pro",
CONF_LLM_HASS_API: ["assist"],
CONF_WEB_SEARCH: True,
},
)
await hass.async_block_till_done()
assert result["type"] is FlowResultType.FORM
assert result["errors"] == {"web_search": "web_search_not_supported"}