Fix outdated R packages failing to fetch (#11039)

PR #10758 made a slight change to find_versions_of_archive() which included
archive_url in the search process. While this fixed `spack create` and
`spack checksum` missing command-line arguments, it caused `spack
install` to prefer those URLs over those it found in the scrape process.

As a result, the package url was treated as a list_url causing all R
packages to stop fetching once the package was updated on CRAN.

This patch is more selective about including the archive_url in the
remote versions, explicitly overriding it with matching versions found
by the scraper.
This commit is contained in:
Justin S 2019-04-19 19:39:13 -05:00 committed by Peter Scheibel
parent 3b34931f68
commit 6f1fe3904c

View file

@ -304,9 +304,8 @@ def find_versions_of_archive(archive_urls, list_url=None, list_depth=0):
list_urls.update(additional_list_urls) list_urls.update(additional_list_urls)
# Grab some web pages to scrape. # Grab some web pages to scrape.
# Start with any links already given.
pages = {} pages = {}
links = set(archive_urls) links = set()
for lurl in list_urls: for lurl in list_urls:
pg, lnk = spider(lurl, depth=list_depth) pg, lnk = spider(lurl, depth=list_depth)
pages.update(pg) pages.update(pg)
@ -345,8 +344,10 @@ def find_versions_of_archive(archive_urls, list_url=None, list_depth=0):
regexes.append(url_regex) regexes.append(url_regex)
# Build a dict version -> URL from any links that match the wildcards. # Build a dict version -> URL from any links that match the wildcards.
# Walk through archive_url links first.
# Any conflicting versions will be overwritten by the list_url links.
versions = {} versions = {}
for url in sorted(links): for url in archive_urls + sorted(links):
if any(re.search(r, url) for r in regexes): if any(re.search(r, url) for r in regexes):
try: try:
ver = spack.url.parse_version(url) ver = spack.url.parse_version(url)