Fix Python 3 support in spack versions
- Add missing import, fixes spack versions in Python 2 - Fix spack versions in Python 3
This commit is contained in:
parent
b67e2db159
commit
28d6d375b4
2 changed files with 5 additions and 4 deletions
|
@ -513,7 +513,7 @@ def wildcard_version(path):
|
||||||
name_parts = re.split(name_re, path)
|
name_parts = re.split(name_re, path)
|
||||||
|
|
||||||
# Even elements in the array did *not* match the name
|
# Even elements in the array did *not* match the name
|
||||||
for i in xrange(0, len(name_parts), 2):
|
for i in range(0, len(name_parts), 2):
|
||||||
# Split each part by things that look like versions.
|
# Split each part by things that look like versions.
|
||||||
vparts = re.split(v.wildcard(), name_parts[i])
|
vparts = re.split(v.wildcard(), name_parts[i])
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
|
|
||||||
from six.moves.urllib.request import urlopen, Request
|
from six.moves.urllib.request import urlopen, Request
|
||||||
from six.moves.urllib.error import URLError
|
from six.moves.urllib.error import URLError
|
||||||
|
from six.moves.urllib.parse import urljoin
|
||||||
from multiprocessing import Pool
|
from multiprocessing import Pool
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -38,7 +39,7 @@
|
||||||
from html.parser import HTMLParser
|
from html.parser import HTMLParser
|
||||||
|
|
||||||
# Also, HTMLParseError is deprecated and never raised.
|
# Also, HTMLParseError is deprecated and never raised.
|
||||||
class HTMLParseError:
|
class HTMLParseError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
import llnl.util.tty as tty
|
import llnl.util.tty as tty
|
||||||
|
@ -110,7 +111,7 @@ def _spider(args):
|
||||||
response_url = response.geturl()
|
response_url = response.geturl()
|
||||||
|
|
||||||
# Read the page and and stick it in the map we'll return
|
# Read the page and and stick it in the map we'll return
|
||||||
page = response.read()
|
page = response.read().decode('utf-8')
|
||||||
pages[response_url] = page
|
pages[response_url] = page
|
||||||
|
|
||||||
# Parse out the links in the page
|
# Parse out the links in the page
|
||||||
|
@ -120,7 +121,7 @@ def _spider(args):
|
||||||
|
|
||||||
while link_parser.links:
|
while link_parser.links:
|
||||||
raw_link = link_parser.links.pop()
|
raw_link = link_parser.links.pop()
|
||||||
abs_link = urlparse.urljoin(response_url, raw_link.strip())
|
abs_link = urljoin(response_url, raw_link.strip())
|
||||||
|
|
||||||
links.add(abs_link)
|
links.add(abs_link)
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue