Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions custom_components/lock_code_manager/const.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,11 @@
# These create switch/binary_sensor entities but their states don't map to access control
EXCLUDED_CONDITION_PLATFORMS = frozenset({"scheduler"})

# Coordinator backoff
BACKOFF_FAILURE_THRESHOLD: int = 3
BACKOFF_INITIAL_SECONDS: int = 60
BACKOFF_MAX_SECONDS: int = 1800 # 30 minutes

# Defaults
DEFAULT_NUM_SLOTS = 3
DEFAULT_START = 1
Expand Down
67 changes: 64 additions & 3 deletions custom_components/lock_code_manager/coordinator.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,20 @@
from __future__ import annotations

from collections.abc import Callable
from datetime import datetime
from datetime import datetime, timedelta
import logging
from typing import TYPE_CHECKING, Any

from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed

from .const import DOMAIN
from .const import (
BACKOFF_FAILURE_THRESHOLD,
BACKOFF_INITIAL_SECONDS,
BACKOFF_MAX_SECONDS,
DOMAIN,
)
from .exceptions import LockCodeManagerError

if TYPE_CHECKING:
Expand Down Expand Up @@ -40,6 +45,8 @@ def __init__(self, hass: HomeAssistant, lock: BaseLock, config_entry: Any) -> No
config_entry=config_entry,
)
self.data: dict[int, int | str] = {}
self._consecutive_failures: int = 0
self._original_update_interval: timedelta | None = update_interval

# Set up drift detection timer for locks with hard_refresh_interval
if lock.hard_refresh_interval:
Expand Down Expand Up @@ -74,18 +81,64 @@ def push_update(self, updates: dict[int, int | str]) -> None:
if new_data == self.data:
return

# A successful push update proves the lock is reachable, so reset
# backoff to re-enable drift checks and normal polling.
self._reset_backoff()

self.async_set_updated_data(new_data)

def _apply_backoff(self) -> None:
"""Increment failure counter and apply exponential backoff if threshold met."""
self._consecutive_failures += 1
if self._consecutive_failures >= BACKOFF_FAILURE_THRESHOLD:
backoff_secs = min(
BACKOFF_INITIAL_SECONDS
* 2 ** (self._consecutive_failures - BACKOFF_FAILURE_THRESHOLD),
BACKOFF_MAX_SECONDS,
)
if self._original_update_interval is not None:
self.update_interval = timedelta(seconds=backoff_secs)
_LOGGER.warning(
"Update failed %d consecutive times for %s, "
"backing off polling interval to %ds",
self._consecutive_failures,
self._lock.lock.entity_id,
backoff_secs,
)
Comment on lines +100 to +107
Copy link

Copilot AI Mar 2, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In _apply_backoff(), the first backoff step sets update_interval to BACKOFF_INITIAL_SECONDS (60s). With the current default usercode_scan_interval also being 60s, this doesn’t actually “back off” the polling interval, but the warning message claims it does. Consider only logging when the interval increases (compare previous vs new), and/or include both old/new intervals in the message so it’s not misleading/noisy.

Suggested change
self.update_interval = timedelta(seconds=backoff_secs)
_LOGGER.warning(
"Update failed %d consecutive times for %s, "
"backing off polling interval to %ds",
self._consecutive_failures,
self._lock.lock.entity_id,
backoff_secs,
)
new_interval = timedelta(seconds=backoff_secs)
# Use the current update interval if available, otherwise fall back to
# the original configured interval for comparison.
previous_interval = self.update_interval or self._original_update_interval
if new_interval > previous_interval:
self.update_interval = new_interval
_LOGGER.warning(
"Update failed %d consecutive times for %s, "
"backing off polling interval from %ds to %ds",
self._consecutive_failures,
self._lock.lock.entity_id,
int(previous_interval.total_seconds()),
backoff_secs,
)
elif new_interval != previous_interval:
# Interval changed but did not increase (unexpected with current
# backoff policy) – still apply without logging a misleading warning.
self.update_interval = new_interval

Copilot uses AI. Check for mistakes.
else:
_LOGGER.warning(
"Update failed %d consecutive times for %s, "
"suppressing drift checks until recovery",
self._consecutive_failures,
self._lock.lock.entity_id,
)

def _reset_backoff(self) -> None:
"""Reset failure counter and restore original update interval."""
if self._consecutive_failures > 0:
_LOGGER.info(
"Lock %s recovered after %d consecutive failures",
self._lock.lock.entity_id,
self._consecutive_failures,
)
self._consecutive_failures = 0
if self._original_update_interval is not None:
self.update_interval = self._original_update_interval

async def async_get_usercodes(self) -> dict[int, int | str]:
"""Update usercodes."""
try:
return await self._lock.async_internal_get_usercodes()
data = await self._lock.async_internal_get_usercodes()
except LockCodeManagerError as err:
self._apply_backoff()
# We can silently fail if we've never been able to retrieve data
if not self.last_update_success:
return {}
raise UpdateFailed from err

self._reset_backoff()
return data

async def _async_drift_check(self, now: datetime) -> None:
"""
Perform periodic drift detection.
Expand All @@ -98,6 +151,14 @@ async def _async_drift_check(self, now: datetime) -> None:
if not self.last_update_success:
return

if self._consecutive_failures >= BACKOFF_FAILURE_THRESHOLD:
_LOGGER.debug(
"Skipping drift check for %s (in backoff after %d failures)",
self._lock.lock.entity_id,
self._consecutive_failures,
)
return
Comment on lines +154 to +160
Copy link

Copilot AI Mar 2, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

_async_drift_check() skips drift checks when _consecutive_failures is above the threshold, but drift-check failures themselves never increment _consecutive_failures. For push-based locks (where update_interval=None), drift checks may be the only periodic lock I/O, so the system may never enter “backoff” and will keep attempting hard refreshes and logging warnings indefinitely when the lock is unreachable. Consider applying the same backoff accounting on hard-refresh failures (increment on LockCodeManagerError, reset on success), or track a separate failure counter for drift checks.

Copilot uses AI. Check for mistakes.

_LOGGER.debug(
"Performing drift detection hard refresh for %s",
self._lock.lock.entity_id,
Expand Down
Loading
Loading