Speed up sql queries where ORM rows are not needed (#91839)

* Speed up logbook and history queries where ORM rows are not needed

This avoids having sqlalchemy wrap Result in ChunkedIteratorResult
which has additional overhead we do not need for these cases

* more places

* anything that uses _sorted_statistics_to_dict does not need orm rows either
This commit is contained in:
J. Nick Koston
2023-04-21 22:28:07 -05:00
committed by GitHub
parent 2663901603
commit 95fcdc5684
10 changed files with 45 additions and 35 deletions

View File

@ -199,6 +199,7 @@ def execute_stmt_lambda_element(
start_time: datetime | None = None,
end_time: datetime | None = None,
yield_per: int = DEFAULT_YIELD_STATES_ROWS,
orm_rows: bool = True,
) -> Sequence[Row] | Result:
"""Execute a StatementLambdaElement.
@ -211,10 +212,13 @@ def execute_stmt_lambda_element(
specific entities) since they are usually faster
with .all().
"""
executed = session.execute(stmt)
use_all = not start_time or ((end_time or dt_util.utcnow()) - start_time).days <= 1
for tryno in range(RETRIES):
try:
if orm_rows:
executed = session.execute(stmt)
else:
executed = session.connection().execute(stmt)
if use_all:
return executed.all()
return executed.yield_per(yield_per)