Compare commits

...

11 Commits

23 changed files with 617 additions and 81 deletions

View File

@@ -2,8 +2,6 @@
from __future__ import annotations
from functools import partial
import anthropic
from homeassistant.config_entries import ConfigEntry, ConfigSubentry
@@ -16,6 +14,7 @@ from homeassistant.helpers import (
entity_registry as er,
issue_registry as ir,
)
from homeassistant.helpers.httpx_client import get_async_client
from homeassistant.helpers.typing import ConfigType
from .const import (
@@ -42,8 +41,8 @@ async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
async def async_setup_entry(hass: HomeAssistant, entry: AnthropicConfigEntry) -> bool:
"""Set up Anthropic from a config entry."""
client = await hass.async_add_executor_job(
partial(anthropic.AsyncAnthropic, api_key=entry.data[CONF_API_KEY])
client = anthropic.AsyncAnthropic(
api_key=entry.data[CONF_API_KEY], http_client=get_async_client(hass)
)
try:
await client.models.list(timeout=10.0)

View File

@@ -2,7 +2,6 @@
from __future__ import annotations
from functools import partial
import json
import logging
import re
@@ -30,6 +29,7 @@ from homeassistant.const import (
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import llm
from homeassistant.helpers.httpx_client import get_async_client
from homeassistant.helpers.selector import (
NumberSelector,
NumberSelectorConfig,
@@ -89,8 +89,8 @@ async def validate_input(hass: HomeAssistant, data: dict[str, Any]) -> None:
Data has the keys from STEP_USER_DATA_SCHEMA with values provided by the user.
"""
client = await hass.async_add_executor_job(
partial(anthropic.AsyncAnthropic, api_key=data[CONF_API_KEY])
client = anthropic.AsyncAnthropic(
api_key=data[CONF_API_KEY], http_client=get_async_client(hass)
)
await client.models.list(timeout=10.0)
@@ -457,11 +457,9 @@ class ConversationSubentryFlowHandler(ConfigSubentryFlow):
async def _get_model_list(self) -> list[SelectOptionDict]:
"""Get list of available models."""
client = await self.hass.async_add_executor_job(
partial(
anthropic.AsyncAnthropic,
api_key=self._get_entry().data[CONF_API_KEY],
)
client = anthropic.AsyncAnthropic(
api_key=self._get_entry().data[CONF_API_KEY],
http_client=get_async_client(self.hass),
)
return await get_model_list(client)
@@ -470,11 +468,9 @@ class ConversationSubentryFlowHandler(ConfigSubentryFlow):
location_data: dict[str, str] = {}
zone_home = self.hass.states.get(ENTITY_ID_HOME)
if zone_home is not None:
client = await self.hass.async_add_executor_job(
partial(
anthropic.AsyncAnthropic,
api_key=self._get_entry().data[CONF_API_KEY],
)
client = anthropic.AsyncAnthropic(
api_key=self._get_entry().data[CONF_API_KEY],
http_client=get_async_client(self.hass),
)
location_schema = vol.Schema(
{

View File

@@ -50,6 +50,7 @@ from homeassistant.config_entries import ConfigEntry
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import llm
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.json import json_dumps
from homeassistant.util import slugify
from .client import CloudClient
@@ -93,7 +94,7 @@ def _convert_content_to_param(
{
"type": "function_call_output",
"call_id": content.tool_call_id,
"output": json.dumps(content.tool_result),
"output": json_dumps(content.tool_result),
}
)
continue
@@ -125,7 +126,7 @@ def _convert_content_to_param(
{
"type": "function_call",
"name": tool_call.tool_name,
"arguments": json.dumps(tool_call.tool_args),
"arguments": json_dumps(tool_call.tool_args),
"call_id": tool_call.id,
}
)

View File

@@ -18,12 +18,12 @@
"entity": {
"stt": {
"elevenlabs_stt": {
"name": "Speech-to-Text"
"name": "Speech-to-text"
}
},
"tts": {
"elevenlabs_tts": {
"name": "Text-to-Speech"
"name": "Text-to-speech"
}
}
},
@@ -34,14 +34,14 @@
"configure_voice": "Configure advanced voice settings",
"model": "Model",
"stt_auto_language": "Auto-detect language",
"stt_model": "Speech-to-Text Model",
"stt_model": "Speech-to-text model",
"voice": "Voice"
},
"data_description": {
"configure_voice": "Configure advanced voice settings. Find more information in the ElevenLabs documentation.",
"model": "ElevenLabs model to use. Please note that not all models support all languages equally well.",
"stt_auto_language": "Automatically detect the spoken language for speech-to-text.",
"stt_model": "Speech-to-Text model to use.",
"stt_model": "Speech-to-text model to use.",
"voice": "Voice to use for text-to-speech."
}
},

View File

@@ -7,6 +7,6 @@
"integration_type": "service",
"iot_class": "cloud_polling",
"quality_scale": "silver",
"requirements": ["essent-dynamic-pricing==0.2.7"],
"requirements": ["essent-dynamic-pricing==0.3.1"],
"single_config_entry": true
}

View File

@@ -7,6 +7,7 @@ import base64
import codecs
from collections.abc import AsyncGenerator, AsyncIterator, Callable
from dataclasses import dataclass, replace
import datetime
import mimetypes
from pathlib import Path
from typing import TYPE_CHECKING, Any, Literal, cast
@@ -181,13 +182,25 @@ def _escape_decode(value: Any) -> Any:
return value
def _validate_tool_results(value: Any) -> Any:
"""Recursively convert non-json-serializable types."""
if isinstance(value, (datetime.time, datetime.date)):
return value.isoformat()
if isinstance(value, list):
return [_validate_tool_results(item) for item in value]
if isinstance(value, dict):
return {k: _validate_tool_results(v) for k, v in value.items()}
return value
def _create_google_tool_response_parts(
parts: list[conversation.ToolResultContent],
) -> list[Part]:
"""Create Google tool response parts."""
return [
Part.from_function_response(
name=tool_result.tool_name, response=tool_result.tool_result
name=tool_result.tool_name,
response=_validate_tool_results(tool_result.tool_result),
)
for tool_result in parts
]

View File

@@ -94,10 +94,10 @@
"entry_not_loaded": "[%key:component::google_generative_ai_conversation::config_subentries::conversation::abort::entry_not_loaded%]",
"reconfigure_successful": "[%key:common::config_flow::abort::reconfigure_successful%]"
},
"entry_type": "Speech-to-Text",
"entry_type": "Speech-to-text",
"initiate_flow": {
"reconfigure": "Reconfigure Speech-to-Text service",
"user": "Add Speech-to-Text service"
"reconfigure": "Reconfigure speech-to-text service",
"user": "Add speech-to-text service"
},
"step": {
"set_options": {
@@ -126,10 +126,10 @@
"entry_not_loaded": "[%key:component::google_generative_ai_conversation::config_subentries::conversation::abort::entry_not_loaded%]",
"reconfigure_successful": "[%key:common::config_flow::abort::reconfigure_successful%]"
},
"entry_type": "Text-to-Speech",
"entry_type": "Text-to-speech",
"initiate_flow": {
"reconfigure": "Reconfigure Text-to-Speech service",
"user": "Add Text-to-Speech service"
"reconfigure": "Reconfigure text-to-speech service",
"user": "Add text-to-speech service"
},
"step": {
"set_options": {

View File

@@ -16,6 +16,7 @@ from homeassistant.config_entries import ConfigSubentry
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import device_registry as dr, llm
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.json import json_dumps
from . import OllamaConfigEntry
from .const import (
@@ -93,7 +94,7 @@ def _convert_content(
if isinstance(chat_content, conversation.ToolResultContent):
return ollama.Message(
role=MessageRole.TOOL.value,
content=json.dumps(chat_content.tool_result),
content=json_dumps(chat_content.tool_result),
)
if isinstance(chat_content, conversation.AssistantContent):
return ollama.Message(

View File

@@ -34,6 +34,7 @@ from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import device_registry as dr, llm
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.json import json_dumps
from . import OpenRouterConfigEntry
from .const import DOMAIN, LOGGER
@@ -109,7 +110,7 @@ def _convert_content_to_chat_message(
return ChatCompletionToolMessageParam(
role="tool",
tool_call_id=content.tool_call_id,
content=json.dumps(content.tool_result),
content=json_dumps(content.tool_result),
)
role: Literal["user", "assistant", "system"] = content.role
@@ -130,7 +131,7 @@ def _convert_content_to_chat_message(
type="function",
id=tool_call.id,
function=Function(
arguments=json.dumps(tool_call.tool_args),
arguments=json_dumps(tool_call.tool_args),
name=tool_call.tool_name,
),
)

View File

@@ -64,6 +64,7 @@ from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import device_registry as dr, issue_registry as ir, llm
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.json import json_dumps
from homeassistant.util import slugify
from .const import (
@@ -183,7 +184,7 @@ def _convert_content_to_param(
FunctionCallOutput(
type="function_call_output",
call_id=content.tool_call_id,
output=json.dumps(content.tool_result),
output=json_dumps(content.tool_result),
)
)
continue
@@ -217,7 +218,7 @@ def _convert_content_to_param(
ResponseFunctionToolCallParam(
type="function_call",
name=tool_call.tool_name,
arguments=json.dumps(tool_call.tool_args),
arguments=json_dumps(tool_call.tool_args),
call_id=tool_call.id,
)
)

View File

@@ -175,6 +175,7 @@ async def _async_setup_block_entry(
)
# https://github.com/home-assistant/core/pull/48076
if device_entry and entry.entry_id not in device_entry.config_entries:
LOGGER.debug("Detected first time setup for device %s", entry.title)
device_entry = None
sleep_period = entry.data.get(CONF_SLEEP_PERIOD)
@@ -288,6 +289,7 @@ async def _async_setup_rpc_entry(hass: HomeAssistant, entry: ShellyConfigEntry)
)
# https://github.com/home-assistant/core/pull/48076
if device_entry and entry.entry_id not in device_entry.config_entries:
LOGGER.debug("Detected first time setup for device %s", entry.title)
device_entry = None
sleep_period = entry.data.get(CONF_SLEEP_PERIOD)

View File

@@ -28,7 +28,7 @@ _LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 1
def _toggle_switch(device: VeSyncBaseDevice, *args) -> Awaitable[bool]:
def _toggle_switch(device: VeSyncBaseDevice, *args: Any) -> Awaitable[bool]:
"""Toggle power on."""
if args and args[0] is True and hasattr(device, "turn_on"):
return device.turn_on()
@@ -37,21 +37,21 @@ def _toggle_switch(device: VeSyncBaseDevice, *args) -> Awaitable[bool]:
raise HomeAssistantError("Device does not support toggling power.")
def _toggle_display(device: VeSyncBaseDevice, *args) -> Awaitable[bool]:
def _toggle_display(device: VeSyncBaseDevice, *args: Any) -> Awaitable[bool]:
"""Toggle display on."""
if hasattr(device, "toggle_display"):
return device.toggle_display(*args)
raise HomeAssistantError("Device does not support toggling display.")
def _toggle_child_lock(device: VeSyncBaseDevice, *args) -> Awaitable[bool]:
def _toggle_child_lock(device: VeSyncBaseDevice, *args: Any) -> Awaitable[bool]:
"""Toggle child lock on."""
if hasattr(device, "toggle_child_lock"):
return device.toggle_child_lock(*args)
raise HomeAssistantError("Device does not support toggling child lock.")
def _toggle_auto_stop(device: VeSyncBaseDevice, *args) -> Awaitable[bool]:
def _toggle_auto_stop(device: VeSyncBaseDevice, *args: Any) -> Awaitable[bool]:
"""Toggle automatic stop on."""
match device:
case VeSyncHumidifier() as sw if hasattr(sw, "toggle_automatic_stop"):

2
requirements_all.txt generated
View File

@@ -929,7 +929,7 @@ eq3btsmart==2.3.0
esphome-dashboard-api==1.3.0
# homeassistant.components.essent
essent-dynamic-pricing==0.2.7
essent-dynamic-pricing==0.3.1
# homeassistant.components.netgear_lte
eternalegypt==0.0.18

View File

@@ -820,7 +820,7 @@ eq3btsmart==2.3.0
esphome-dashboard-api==1.3.0
# homeassistant.components.essent
essent-dynamic-pricing==0.2.7
essent-dynamic-pricing==0.3.1
# homeassistant.components.netgear_lte
eternalegypt==0.0.18

View File

@@ -3,6 +3,7 @@
from __future__ import annotations
import base64
import datetime
from pathlib import Path
from unittest.mock import AsyncMock, MagicMock, patch
@@ -223,9 +224,40 @@ async def test_prepare_chat_for_generation_passes_messages_through(
) -> None:
"""Test that prepared messages are forwarded unchanged."""
chat_log = conversation.ChatLog(hass, "conversation-id")
chat_log.async_add_assistant_content_without_tools(
conversation.AssistantContent(agent_id="agent", content="Ready")
chat_log.async_add_user_content(
conversation.UserContent(content="What time is it?")
)
chat_log.async_add_assistant_content_without_tools(
conversation.AssistantContent(
agent_id="agent",
tool_calls=[
llm.ToolInput(
tool_name="HassGetCurrentTime",
tool_args={},
id="mock-tool-call-id",
external=True,
)
],
)
)
chat_log.async_add_assistant_content_without_tools(
conversation.ToolResultContent(
agent_id="agent",
tool_call_id="mock-tool-call-id",
tool_name="HassGetCurrentTime",
tool_result={
"speech": {"plain": {"speech": "12:00 PM", "extra_data": None}},
"response_type": "action_done",
"speech_slots": {"time": datetime.time(12, 0)},
"data": {"targets": [], "success": [], "failed": []},
},
)
)
chat_log.async_add_assistant_content_without_tools(
conversation.AssistantContent(agent_id="agent", content="12:00 PM")
)
messages = _convert_content_to_param(chat_log.content)
response = await cloud_entity._prepare_chat_for_generation(chat_log, messages)

View File

@@ -1,6 +1,57 @@
# serializer version: 1
# name: test_function_call
list([
Content(
parts=[
Part(
text='What time is it?'
),
],
role='user'
),
Content(
parts=[
Part(
function_call=FunctionCall(
args={},
name='HassGetCurrentTime'
)
),
],
role='model'
),
Content(
parts=[
Part(
function_response=FunctionResponse(
name='HassGetCurrentTime',
response={
'data': {
'failed': [],
'success': [],
'targets': []
},
'response_type': 'action_done',
'speech': {
'plain': {<... 2 items at Max depth ...>}
},
'speech_slots': {
'time': '16:24:17.813343'
}
}
)
),
],
role='user'
),
Content(
parts=[
Part(
text='4:24 PM'
),
],
role='model'
),
Content(
parts=[
Part(

View File

@@ -1,5 +1,6 @@
"""Tests for the Google Generative AI Conversation integration conversation platform."""
import datetime
from unittest.mock import AsyncMock, patch
from freezegun import freeze_time
@@ -8,7 +9,11 @@ import pytest
from syrupy.assertion import SnapshotAssertion
from homeassistant.components import conversation
from homeassistant.components.conversation import UserContent
from homeassistant.components.conversation import (
AssistantContent,
ToolResultContent,
UserContent,
)
from homeassistant.components.google_generative_ai_conversation.entity import (
ERROR_GETTING_RESPONSE,
_escape_decode,
@@ -17,6 +22,7 @@ from homeassistant.components.google_generative_ai_conversation.entity import (
from homeassistant.const import CONF_LLM_HASS_API
from homeassistant.core import Context, HomeAssistant
from homeassistant.helpers import intent
from homeassistant.helpers.llm import ToolInput
from . import API_ERROR_500, CLIENT_ERROR_BAD_REQUEST
@@ -87,6 +93,41 @@ async def test_function_call(
agent_id = "conversation.google_ai_conversation"
context = Context()
# Add some pre-existing content from conversation.default_agent
mock_chat_log.async_add_user_content(UserContent(content="What time is it?"))
mock_chat_log.async_add_assistant_content_without_tools(
AssistantContent(
agent_id=agent_id,
tool_calls=[
ToolInput(
tool_name="HassGetCurrentTime",
tool_args={},
id="01KGW7TFC1VVVK7ANHVMDA4DJ6",
external=True,
)
],
)
)
mock_chat_log.async_add_assistant_content_without_tools(
ToolResultContent(
agent_id=agent_id,
tool_call_id="01KGW7TFC1VVVK7ANHVMDA4DJ6",
tool_name="HassGetCurrentTime",
tool_result={
"speech": {"plain": {"speech": "4:24 PM", "extra_data": None}},
"response_type": "action_done",
"speech_slots": {"time": datetime.time(16, 24, 17, 813343)},
"data": {"targets": [], "success": [], "failed": []},
},
)
)
mock_chat_log.async_add_assistant_content_without_tools(
AssistantContent(
agent_id=agent_id,
content="4:24 PM",
)
)
messages = [
# Function call stream
[

View File

@@ -1,6 +1,7 @@
"""Tests for the Ollama integration."""
from collections.abc import AsyncGenerator
import datetime
from typing import Any
from unittest.mock import AsyncMock, Mock, patch
@@ -23,6 +24,10 @@ from homeassistant.helpers import (
)
from tests.common import MockConfigEntry
from tests.components.conversation import (
MockChatLog,
mock_chat_log, # noqa: F401
)
@pytest.fixture(autouse=True)
@@ -458,6 +463,102 @@ async def test_function_exception(
)
async def test_history_conversion(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_init_component,
mock_chat_log: MockChatLog, # noqa: F811
) -> None:
"""Test that the pre-existing chat_log history is handled properly."""
agent_id = "conversation.ollama_conversation"
# Add some pre-existing content from conversation.default_agent
mock_chat_log.async_add_user_content(
conversation.UserContent(content="What time is it?")
)
mock_chat_log.async_add_assistant_content_without_tools(
conversation.AssistantContent(
agent_id=agent_id,
tool_calls=[
llm.ToolInput(
tool_name="HassGetCurrentTime",
tool_args={},
id="01KGW7TFC1VVVK7ANHVMDA4DJ6",
external=True,
)
],
)
)
mock_chat_log.async_add_assistant_content_without_tools(
conversation.ToolResultContent(
agent_id=agent_id,
tool_call_id="01KGW7TFC1VVVK7ANHVMDA4DJ6",
tool_name="HassGetCurrentTime",
tool_result={
"speech": {"plain": {"speech": "4:24 PM", "extra_data": None}},
"response_type": "action_done",
"speech_slots": {"time": datetime.time(16, 24, 17, 813343)},
"data": {"targets": [], "success": [], "failed": []},
},
)
)
mock_chat_log.async_add_assistant_content_without_tools(
conversation.AssistantContent(
agent_id=agent_id,
content="4:24 PM",
)
)
entry = MockConfigEntry()
entry.add_to_hass(hass)
with patch(
"ollama.AsyncClient.chat",
return_value=stream_generator(
{"message": {"role": "assistant", "content": "test response"}}
),
) as mock_chat:
result = await conversation.async_converse(
hass,
"test message",
mock_chat_log.conversation_id,
Context(),
agent_id=agent_id,
)
assert mock_chat.call_count == 1
args = mock_chat.call_args.kwargs
prompt = args["messages"][0]["content"]
assert args["model"] == "test_model:latest"
assert args["messages"] == [
Message(role="system", content=prompt),
Message(role="user", content="What time is it?"),
Message(
role="assistant",
tool_calls=[
Message.ToolCall(
function=Message.ToolCall.Function(
name="HassGetCurrentTime", arguments={}
)
)
],
),
Message(
role="tool",
content='{"speech":{"plain":{"speech":"4:24 PM","extra_data":null}},"response_type":"action_done","speech_slots":{"time":"16:24:17.813343"},"data":{"targets":[],"success":[],"failed":[]}}',
),
Message(role="assistant", content="4:24 PM"),
Message(role="user", content="test message"),
]
assert result.response.response_type == intent.IntentResponseType.ACTION_DONE, (
result
)
assert result.response.speech["plain"]["speech"] == "test response"
async def test_unknown_hass_api(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,

View File

@@ -126,6 +126,65 @@
# ---
# name: test_function_call[True]
list([
dict({
'attachments': None,
'content': 'What time is it?',
'created': HAFakeDatetime(2024, 5, 24, 12, 0, tzinfo=datetime.timezone.utc),
'role': 'user',
}),
dict({
'agent_id': 'conversation.gpt_3_5_turbo',
'content': None,
'created': HAFakeDatetime(2024, 5, 24, 12, 0, tzinfo=datetime.timezone.utc),
'native': None,
'role': 'assistant',
'thinking_content': None,
'tool_calls': list([
dict({
'external': True,
'id': 'mock_tool_call_id',
'tool_args': dict({
}),
'tool_name': 'HassGetCurrentTime',
}),
]),
}),
dict({
'agent_id': 'conversation.gpt_3_5_turbo',
'created': HAFakeDatetime(2024, 5, 24, 12, 0, tzinfo=datetime.timezone.utc),
'role': 'tool_result',
'tool_call_id': 'mock_tool_call_id',
'tool_name': 'HassGetCurrentTime',
'tool_result': dict({
'data': dict({
'failed': list([
]),
'success': list([
]),
'targets': list([
]),
}),
'response_type': 'action_done',
'speech': dict({
'plain': dict({
'extra_data': None,
'speech': '12:00 PM',
}),
}),
'speech_slots': dict({
'time': datetime.time(12, 0),
}),
}),
}),
dict({
'agent_id': 'conversation.gpt_3_5_turbo',
'content': '12:00 PM',
'created': HAFakeDatetime(2024, 5, 24, 12, 0, tzinfo=datetime.timezone.utc),
'native': None,
'role': 'assistant',
'thinking_content': None,
'tool_calls': None,
}),
dict({
'attachments': None,
'content': 'Please call the test function',
@@ -169,3 +228,68 @@
}),
])
# ---
# name: test_function_call[True].1
list([
dict({
'content': '''
You are a helpful assistant.
Only if the user wants to control a device, tell them to expose entities to their voice assistant in Home Assistant.
''',
'role': 'system',
}),
dict({
'content': 'What time is it?',
'role': 'user',
}),
dict({
'content': None,
'role': 'assistant',
'tool_calls': list([
dict({
'function': dict({
'arguments': '{}',
'name': 'HassGetCurrentTime',
}),
'id': 'mock_tool_call_id',
'type': 'function',
}),
]),
}),
dict({
'content': '{"speech":{"plain":{"speech":"12:00 PM","extra_data":null}},"response_type":"action_done","speech_slots":{"time":"12:00:00"},"data":{"targets":[],"success":[],"failed":[]}}',
'role': 'tool',
'tool_call_id': 'mock_tool_call_id',
}),
dict({
'content': '12:00 PM',
'role': 'assistant',
}),
dict({
'content': 'Please call the test function',
'role': 'user',
}),
dict({
'content': None,
'role': 'assistant',
'tool_calls': list([
dict({
'function': dict({
'arguments': '{"param1":"call1"}',
'name': 'test_tool',
}),
'id': 'call_call_1',
'type': 'function',
}),
]),
}),
dict({
'content': '"value1"',
'role': 'tool',
'tool_call_id': 'call_call_1',
}),
dict({
'content': 'I have successfully called the function',
'role': 'assistant',
}),
])
# ---

View File

@@ -1,5 +1,6 @@
"""Tests for the OpenRouter integration."""
import datetime
from unittest.mock import AsyncMock, patch
from freezegun import freeze_time
@@ -18,6 +19,7 @@ from homeassistant.components import conversation
from homeassistant.const import Platform
from homeassistant.core import Context, HomeAssistant
from homeassistant.helpers import entity_registry as er, intent
from homeassistant.helpers.llm import ToolInput
from . import setup_integration
@@ -88,6 +90,43 @@ async def test_function_call(
"""Test function call from the assistant."""
await setup_integration(hass, mock_config_entry)
# Add some pre-existing content from conversation.default_agent
mock_chat_log.async_add_user_content(
conversation.UserContent(content="What time is it?")
)
mock_chat_log.async_add_assistant_content_without_tools(
conversation.AssistantContent(
agent_id="conversation.gpt_3_5_turbo",
tool_calls=[
ToolInput(
tool_name="HassGetCurrentTime",
tool_args={},
id="mock_tool_call_id",
external=True,
)
],
)
)
mock_chat_log.async_add_assistant_content_without_tools(
conversation.ToolResultContent(
agent_id="conversation.gpt_3_5_turbo",
tool_call_id="mock_tool_call_id",
tool_name="HassGetCurrentTime",
tool_result={
"speech": {"plain": {"speech": "12:00 PM", "extra_data": None}},
"response_type": "action_done",
"speech_slots": {"time": datetime.time(12, 0)},
"data": {"targets": [], "success": [], "failed": []},
},
)
)
mock_chat_log.async_add_assistant_content_without_tools(
conversation.AssistantContent(
agent_id="conversation.gpt_3_5_turbo",
content="12:00 PM",
)
)
mock_chat_log.mock_tool_results(
{
"call_call_1": "value1",
@@ -95,34 +134,8 @@ async def test_function_call(
}
)
async def completion_result(*args, messages, **kwargs):
for message in messages:
role = message["role"] if isinstance(message, dict) else message.role
if role == "tool":
return ChatCompletion(
id="chatcmpl-1234567890ZYXWVUTSRQPONMLKJIH",
choices=[
Choice(
finish_reason="stop",
index=0,
message=ChatCompletionMessage(
content="I have successfully called the function",
role="assistant",
function_call=None,
tool_calls=None,
),
)
],
created=1700000000,
model="gpt-4-1106-preview",
object="chat.completion",
system_fingerprint=None,
usage=CompletionUsage(
completion_tokens=9, prompt_tokens=8, total_tokens=17
),
)
return ChatCompletion(
mock_openai_client.chat.completions.create.side_effect = (
ChatCompletion(
id="chatcmpl-1234567890ABCDEFGHIJKLMNOPQRS",
choices=[
Choice(
@@ -152,9 +165,30 @@ async def test_function_call(
usage=CompletionUsage(
completion_tokens=9, prompt_tokens=8, total_tokens=17
),
)
mock_openai_client.chat.completions.create = completion_result
),
ChatCompletion(
id="chatcmpl-1234567890ZYXWVUTSRQPONMLKJIH",
choices=[
Choice(
finish_reason="stop",
index=0,
message=ChatCompletionMessage(
content="I have successfully called the function",
role="assistant",
function_call=None,
tool_calls=None,
),
)
],
created=1700000000,
model="gpt-4-1106-preview",
object="chat.completion",
system_fingerprint=None,
usage=CompletionUsage(
completion_tokens=9, prompt_tokens=8, total_tokens=17
),
),
)
result = await conversation.async_converse(
hass,
@@ -167,3 +201,8 @@ async def test_function_call(
assert result.response.response_type == intent.IntentResponseType.ACTION_DONE
# Don't test the prompt, as it's not deterministic
assert mock_chat_log.content[1:] == snapshot
assert mock_openai_client.chat.completions.create.call_count == 2
assert (
mock_openai_client.chat.completions.create.call_args.kwargs["messages"]
== snapshot
)

View File

@@ -7,14 +7,14 @@
'type': 'message',
}),
dict({
'arguments': '{"code": "import math\\nmath.sqrt(55555)", "container": "cntr_A"}',
'arguments': '{"code":"import math\\nmath.sqrt(55555)","container":"cntr_A"}',
'call_id': 'ci_A',
'name': 'code_interpreter',
'type': 'function_call',
}),
dict({
'call_id': 'ci_A',
'output': '{"output": [{"logs": "235.70108188126758\\n", "type": "logs"}]}',
'output': '{"output":[{"logs":"235.70108188126758\\n","type":"logs"}]}',
'type': 'function_call_output',
}),
dict({
@@ -36,6 +36,65 @@
# ---
# name: test_function_call
list([
dict({
'attachments': None,
'content': 'What time is it?',
'created': HAFakeDatetime(2025, 10, 31, 12, 0, tzinfo=datetime.timezone.utc),
'role': 'user',
}),
dict({
'agent_id': 'conversation.openai_conversation',
'content': None,
'created': HAFakeDatetime(2025, 10, 31, 12, 0, tzinfo=datetime.timezone.utc),
'native': None,
'role': 'assistant',
'thinking_content': None,
'tool_calls': list([
dict({
'external': True,
'id': 'mock-tool-call-id',
'tool_args': dict({
}),
'tool_name': 'HassGetCurrentTime',
}),
]),
}),
dict({
'agent_id': 'conversation.openai_conversation',
'created': HAFakeDatetime(2025, 10, 31, 12, 0, tzinfo=datetime.timezone.utc),
'role': 'tool_result',
'tool_call_id': 'mock-tool-call-id',
'tool_name': 'HassGetCurrentTime',
'tool_result': dict({
'data': dict({
'failed': list([
]),
'success': list([
]),
'targets': list([
]),
}),
'response_type': 'action_done',
'speech': dict({
'plain': dict({
'extra_data': None,
'speech': '12:00 PM',
}),
}),
'speech_slots': dict({
'time': datetime.time(12, 0),
}),
}),
}),
dict({
'agent_id': 'conversation.openai_conversation',
'content': '12:00 PM',
'created': HAFakeDatetime(2025, 10, 31, 12, 0, tzinfo=datetime.timezone.utc),
'native': None,
'role': 'assistant',
'thinking_content': None,
'tool_calls': None,
}),
dict({
'attachments': None,
'content': 'Please call the test function',
@@ -125,6 +184,27 @@
# ---
# name: test_function_call.1
list([
dict({
'content': 'What time is it?',
'role': 'user',
'type': 'message',
}),
dict({
'arguments': '{}',
'call_id': 'mock-tool-call-id',
'name': 'HassGetCurrentTime',
'type': 'function_call',
}),
dict({
'call_id': 'mock-tool-call-id',
'output': '{"speech":{"plain":{"speech":"12:00 PM","extra_data":null}},"response_type":"action_done","speech_slots":{"time":"12:00:00"},"data":{"targets":[],"success":[],"failed":[]}}',
'type': 'function_call_output',
}),
dict({
'content': '12:00 PM',
'role': 'assistant',
'type': 'message',
}),
dict({
'content': 'Please call the test function',
'role': 'user',
@@ -146,7 +226,7 @@
'type': 'reasoning',
}),
dict({
'arguments': '{"param1": "call1"}',
'arguments': '{"param1":"call1"}',
'call_id': 'call_call_1',
'name': 'test_tool',
'type': 'function_call',
@@ -157,7 +237,7 @@
'type': 'function_call_output',
}),
dict({
'arguments': '{"param1": "call2"}',
'arguments': '{"param1":"call2"}',
'call_id': 'call_call_2',
'name': 'test_tool',
'type': 'function_call',

View File

@@ -1,5 +1,6 @@
"""Tests for the OpenAI integration."""
import datetime
from unittest.mock import AsyncMock, patch
from freezegun import freeze_time
@@ -30,6 +31,7 @@ from homeassistant.components.openai_conversation.const import (
from homeassistant.const import CONF_LLM_HASS_API
from homeassistant.core import Context, HomeAssistant
from homeassistant.helpers import intent
from homeassistant.helpers.llm import ToolInput
from homeassistant.setup import async_setup_component
from . import (
@@ -251,6 +253,44 @@ async def test_function_call(
snapshot: SnapshotAssertion,
) -> None:
"""Test function call from the assistant."""
# Add some pre-existing content from conversation.default_agent
mock_chat_log.async_add_user_content(
conversation.UserContent(content="What time is it?")
)
mock_chat_log.async_add_assistant_content_without_tools(
conversation.AssistantContent(
agent_id="conversation.openai_conversation",
tool_calls=[
ToolInput(
tool_name="HassGetCurrentTime",
tool_args={},
id="mock-tool-call-id",
external=True,
)
],
)
)
mock_chat_log.async_add_assistant_content_without_tools(
conversation.ToolResultContent(
agent_id="conversation.openai_conversation",
tool_call_id="mock-tool-call-id",
tool_name="HassGetCurrentTime",
tool_result={
"speech": {"plain": {"speech": "12:00 PM", "extra_data": None}},
"response_type": "action_done",
"speech_slots": {"time": datetime.time(12, 0, 0, 0)},
"data": {"targets": [], "success": [], "failed": []},
},
)
)
mock_chat_log.async_add_assistant_content_without_tools(
conversation.AssistantContent(
agent_id="conversation.openai_conversation",
content="12:00 PM",
)
)
mock_create_stream.return_value = [
# Initial conversation
(

View File

@@ -37,12 +37,18 @@ from homeassistant.const import (
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import issue_registry as ir
from homeassistant.helpers.device_registry import DeviceRegistry, format_mac
from homeassistant.helpers.device_registry import (
CONNECTION_NETWORK_MAC,
DeviceRegistry,
format_mac,
)
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.setup import async_setup_component
from . import MOCK_MAC, init_integration, mutate_rpc_device_status, register_sub_device
from tests.common import MockConfigEntry
async def test_custom_coap_port(
hass: HomeAssistant, mock_block_device: Mock, caplog: pytest.LogCaptureFixture
@@ -126,7 +132,15 @@ async def test_shared_device_mac(
caplog: pytest.LogCaptureFixture,
) -> None:
"""Test first time shared device with another domain."""
other_entry = MockConfigEntry(domain="other_domain", unique_id=MOCK_MAC)
other_entry.add_to_hass(hass)
device_registry.async_get_or_create(
config_entry_id=other_entry.entry_id,
connections={(CONNECTION_NETWORK_MAC, MOCK_MAC)},
)
await init_integration(hass, gen, sleep_period=1000)
assert "Detected first time setup for device" in caplog.text
assert "will resume when device is online" in caplog.text