BinaryCacheIndex: track update failures with cooldown (#33509)

#32137 added an option to update() a BinaryCacheIndex with a
cooldown: repeated attempts within this cooldown would not
actually retry. However, the cooldown was not properly
tracked for failures (which is common when the mirror
does not store any binaries and therefore has no index.json).

This commit ensures that update(..., with_cooldown=True) will
also skip the update even if a failure has occurred within the
cooldown period.
This commit is contained in:
Jonathon Anderson 2022-10-25 13:36:12 -05:00 committed by GitHub
parent d2e75045b8
commit 9d5151ba25
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -107,7 +107,8 @@ def __init__(self, cache_root):
# cache (_mirrors_for_spec) # cache (_mirrors_for_spec)
self._specs_already_associated = set() self._specs_already_associated = set()
# mapping from mirror urls to the time.time() of the last index fetch. # mapping from mirror urls to the time.time() of the last index fetch and a bool indicating
# whether the fetch succeeded or not.
self._last_fetch_times = {} self._last_fetch_times = {}
# _mirrors_for_spec is a dictionary mapping DAG hashes to lists of # _mirrors_for_spec is a dictionary mapping DAG hashes to lists of
@ -346,21 +347,25 @@ def update(self, with_cooldown=False):
with_cooldown with_cooldown
and ttl > 0 and ttl > 0
and cached_mirror_url in self._last_fetch_times and cached_mirror_url in self._last_fetch_times
and now - self._last_fetch_times[cached_mirror_url] < ttl and now - self._last_fetch_times[cached_mirror_url][0] < ttl
): ):
# The fetch worked last time, so don't error # We're in the cooldown period, don't try to fetch again
all_methods_failed = False # If the fetch succeeded last time, consider this update a success, otherwise
# re-report the error here
if self._last_fetch_times[cached_mirror_url][1]:
all_methods_failed = False
else: else:
# May need to fetch the index and update the local caches # May need to fetch the index and update the local caches
try: try:
needs_regen = self._fetch_and_cache_index( needs_regen = self._fetch_and_cache_index(
cached_mirror_url, expect_hash=cached_index_hash cached_mirror_url, expect_hash=cached_index_hash
) )
self._last_fetch_times[cached_mirror_url] = now self._last_fetch_times[cached_mirror_url] = (now, True)
all_methods_failed = False all_methods_failed = False
except FetchCacheError as fetch_error: except FetchCacheError as fetch_error:
needs_regen = False needs_regen = False
fetch_errors.extend(fetch_error.errors) fetch_errors.extend(fetch_error.errors)
self._last_fetch_times[cached_mirror_url] = (now, False)
# The need to regenerate implies a need to clear as well. # The need to regenerate implies a need to clear as well.
spec_cache_clear_needed |= needs_regen spec_cache_clear_needed |= needs_regen
spec_cache_regenerate_needed |= needs_regen spec_cache_regenerate_needed |= needs_regen
@ -392,11 +397,12 @@ def update(self, with_cooldown=False):
# Need to fetch the index and update the local caches # Need to fetch the index and update the local caches
try: try:
needs_regen = self._fetch_and_cache_index(mirror_url) needs_regen = self._fetch_and_cache_index(mirror_url)
self._last_fetch_times[mirror_url] = now self._last_fetch_times[mirror_url] = (now, True)
all_methods_failed = False all_methods_failed = False
except FetchCacheError as fetch_error: except FetchCacheError as fetch_error:
fetch_errors.extend(fetch_error.errors) fetch_errors.extend(fetch_error.errors)
needs_regen = False needs_regen = False
self._last_fetch_times[mirror_url] = (now, False)
# Generally speaking, a new mirror wouldn't imply the need to # Generally speaking, a new mirror wouldn't imply the need to
# clear the spec cache, so leave it as is. # clear the spec cache, so leave it as is.
if needs_regen: if needs_regen: