Merge pull request #1015 from LLNL/features/faster-virtuals

Faster virtuals and concretization
This commit is contained in:
Todd Gamblin 2016-08-09 10:33:40 -07:00 committed by GitHub
commit a095fd517f
25 changed files with 1348 additions and 469 deletions

2
.gitignore vendored
View file

@ -1,5 +1,7 @@
/var/spack/stage
/var/spack/cache
/var/spack/repos/*/index.yaml
/var/spack/repos/*/lock
*.pyc
/opt
*~

View file

@ -28,6 +28,9 @@
import time
import socket
__all__ = ['Lock', 'LockTransaction', 'WriteTransaction', 'ReadTransaction',
'LockError']
# Default timeout in seconds, after which locks will raise exceptions.
_default_timeout = 60
@ -36,13 +39,20 @@
class Lock(object):
def __init__(self,file_path):
"""This is an implementation of a filesystem lock using Python's lockf.
In Python, `lockf` actually calls `fcntl`, so this should work with any
filesystem implementation that supports locking through the fcntl calls.
This includes distributed filesystems like Lustre (when flock is enabled)
and recent NFS versions.
"""
def __init__(self, file_path):
self._file_path = file_path
self._fd = None
self._reads = 0
self._writes = 0
def _lock(self, op, timeout):
"""This takes a lock using POSIX locks (``fnctl.lockf``).
@ -63,7 +73,9 @@ def _lock(self, op, timeout):
fcntl.lockf(self._fd, op | fcntl.LOCK_NB)
if op == fcntl.LOCK_EX:
os.write(self._fd, "pid=%s,host=%s" % (os.getpid(), socket.getfqdn()))
os.write(
self._fd,
"pid=%s,host=%s" % (os.getpid(), socket.getfqdn()))
return
except IOError as error:
@ -75,7 +87,6 @@ def _lock(self, op, timeout):
raise LockError("Timed out waiting for lock.")
def _unlock(self):
"""Releases a lock using POSIX locks (``fcntl.lockf``)
@ -83,11 +94,10 @@ def _unlock(self):
be masquerading as write locks, but this removes either.
"""
fcntl.lockf(self._fd,fcntl.LOCK_UN)
fcntl.lockf(self._fd, fcntl.LOCK_UN)
os.close(self._fd)
self._fd = None
def acquire_read(self, timeout=_default_timeout):
"""Acquires a recursive, shared lock for reading.
@ -107,7 +117,6 @@ def acquire_read(self, timeout=_default_timeout):
self._reads += 1
return False
def acquire_write(self, timeout=_default_timeout):
"""Acquires a recursive, exclusive lock for writing.
@ -127,7 +136,6 @@ def acquire_write(self, timeout=_default_timeout):
self._writes += 1
return False
def release_read(self):
"""Releases a read lock.
@ -148,7 +156,6 @@ def release_read(self):
self._reads -= 1
return False
def release_write(self):
"""Releases a write lock.
@ -170,6 +177,68 @@ def release_write(self):
return False
class LockTransaction(object):
"""Simple nested transaction context manager that uses a file lock.
This class can trigger actions when the lock is acquired for the
first time and released for the last.
If the acquire_fn returns a value, it is used as the return value for
__enter__, allowing it to be passed as the `as` argument of a `with`
statement.
If acquire_fn returns a context manager, *its* `__enter__` function will be
called in `__enter__` after acquire_fn, and its `__exit__` funciton will be
called before `release_fn` in `__exit__`, allowing you to nest a context
manager to be used along with the lock.
Timeout for lock is customizable.
"""
def __init__(self, lock, acquire_fn=None, release_fn=None,
timeout=_default_timeout):
self._lock = lock
self._timeout = timeout
self._acquire_fn = acquire_fn
self._release_fn = release_fn
self._as = None
def __enter__(self):
if self._enter() and self._acquire_fn:
self._as = self._acquire_fn()
if hasattr(self._as, '__enter__'):
return self._as.__enter__()
else:
return self._as
def __exit__(self, type, value, traceback):
suppress = False
if self._exit():
if self._as and hasattr(self._as, '__exit__'):
if self._as.__exit__(type, value, traceback):
suppress = True
if self._release_fn:
if self._release_fn(type, value, traceback):
suppress = True
return suppress
class ReadTransaction(LockTransaction):
def _enter(self):
return self._lock.acquire_read(self._timeout)
def _exit(self):
return self._lock.release_read()
class WriteTransaction(LockTransaction):
def _enter(self):
return self._lock.acquire_write(self._timeout)
def _exit(self):
return self._lock.release_write()
class LockError(Exception):
"""Raised when an attempt to acquire a lock times out."""
pass

View file

@ -1,3 +1,4 @@
# flake8: noqa
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
@ -50,8 +51,15 @@
share_path = join_path(spack_root, "share", "spack")
cache_path = join_path(var_path, "cache")
# User configuration location
user_config_path = os.path.expanduser('~/.spack')
import spack.fetch_strategy
cache = spack.fetch_strategy.FsCache(cache_path)
fetch_cache = spack.fetch_strategy.FsCache(cache_path)
from spack.file_cache import FileCache
user_cache_path = join_path(user_config_path, 'cache')
user_cache = FileCache(user_cache_path)
prefix = spack_root
opt_path = join_path(prefix, "opt")
@ -140,7 +148,7 @@
_tmp_candidates = (_default_tmp, '/nfs/tmp2', '/tmp', '/var/tmp')
for path in _tmp_candidates:
# don't add a second username if it's already unique by user.
if not _tmp_user in path:
if _tmp_user not in path:
tmp_dirs.append(join_path(path, '%u', 'spack-stage'))
else:
tmp_dirs.append(join_path(path, 'spack-stage'))
@ -172,12 +180,13 @@
# Spack internal code should call 'import spack' and accesses other
# variables (spack.repo, paths, etc.) directly.
#
# TODO: maybe this should be separated out and should go in build_environment.py?
# TODO: it's not clear where all the stuff that needs to be included in packages
# should live. This file is overloaded for spack core vs. for packages.
# TODO: maybe this should be separated out to build_environment.py?
# TODO: it's not clear where all the stuff that needs to be included in
# packages should live. This file is overloaded for spack core vs.
# for packages.
#
__all__ = ['Package', 'StagedPackage', 'CMakePackage', \
'Version', 'when', 'ver', 'alldeps', 'nolink']
__all__ = ['Package', 'StagedPackage', 'CMakePackage',
'Version', 'when', 'ver', 'alldeps', 'nolink']
from spack.package import Package, ExtensionConflictError
from spack.package import StagedPackage, CMakePackage
from spack.version import Version, ver
@ -197,8 +206,8 @@
__all__ += spack.util.executable.__all__
from spack.package import \
install_dependency_symlinks, flatten_dependencies, DependencyConflictError, \
InstallError, ExternalPackageError
install_dependency_symlinks, flatten_dependencies, \
DependencyConflictError, InstallError, ExternalPackageError
__all__ += [
'install_dependency_symlinks', 'flatten_dependencies', 'DependencyConflictError',
'InstallError', 'ExternalPackageError']
'install_dependency_symlinks', 'flatten_dependencies',
'DependencyConflictError', 'InstallError', 'ExternalPackageError']

View file

@ -383,6 +383,13 @@ def __str__(self):
def __contains__(self, string):
return string in str(self)
# TODO: make this unnecessary: don't include an empty arch on *every* spec.
def __nonzero__(self):
return (self.platform is not None or
self.platform_os is not None or
self.target is not None)
__bool__ = __nonzero__
def _cmp_key(self):
if isinstance(self.platform, Platform):
platform = self.platform.name

View file

@ -33,7 +33,11 @@ def setup_parser(subparser):
'-s', '--stage', action='store_true', default=True,
help="Remove all temporary build stages (default).")
subparser.add_argument(
'-c', '--cache', action='store_true', help="Remove cached downloads.")
'-d', '--downloads', action='store_true',
help="Remove cached downloads.")
subparser.add_argument(
'-u', '--user-cache', action='store_true',
help="Remove caches in user home directory. Includes virtual indices.")
subparser.add_argument(
'-a', '--all', action='store_true',
help="Remove all of the above.")
@ -41,12 +45,14 @@ def setup_parser(subparser):
def purge(parser, args):
# Special case: no flags.
if not any((args.stage, args.cache, args.all)):
if not any((args.stage, args.downloads, args.user_cache, args.all)):
stage.purge()
return
# handle other flags with fall through.
if args.stage or args.all:
stage.purge()
if args.cache or args.all:
spack.cache.destroy()
if args.downloads or args.all:
spack.fetch_cache.destroy()
if args.user_cache or args.all:
spack.user_cache.destroy()

View file

@ -23,28 +23,28 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
from pprint import pprint
from llnl.util.filesystem import join_path, mkdirp
from llnl.util.tty.colify import colify
from llnl.util.lang import list_modules
import spack
import spack.test
from spack.fetch_strategy import FetchError
description ="Run unit tests"
description = "Run unit tests"
def setup_parser(subparser):
subparser.add_argument(
'names', nargs='*', help="Names of tests to run.")
subparser.add_argument(
'-l', '--list', action='store_true', dest='list', help="Show available tests")
'-l', '--list', action='store_true', dest='list',
help="Show available tests")
subparser.add_argument(
'--createXmlOutput', action='store_true', dest='createXmlOutput',
'--createXmlOutput', action='store_true', dest='createXmlOutput',
help="Create JUnit XML from test results")
subparser.add_argument(
'--xmlOutputDir', dest='xmlOutputDir',
'--xmlOutputDir', dest='xmlOutputDir',
help="Nose creates XML files in this directory")
subparser.add_argument(
'-v', '--verbose', action='store_true', dest='verbose',
@ -62,13 +62,14 @@ def fetcher(self, targetPath, digest):
class MockCacheFetcher(object):
def set_stage(self, stage):
pass
def fetch(self):
raise FetchError("Mock cache always fails for tests")
def __str__(self):
return "[mock fetcher]"
def test(parser, args):
if args.list:
print "Available tests:"
@ -82,8 +83,8 @@ def test(parser, args):
outputDir = join_path(os.getcwd(), "test-output")
else:
outputDir = os.path.abspath(args.xmlOutputDir)
if not os.path.exists(outputDir):
mkdirp(outputDir)
spack.cache = MockCache()
spack.fetch_cache = MockCache()
spack.test.run(args.names, outputDir, args.verbose)

View file

@ -184,7 +184,8 @@ def uninstall(parser, args):
uninstall_list = list(set(uninstall_list))
if has_error:
tty.die('You can use spack uninstall --dependents to uninstall these dependencies as well') # NOQA: ignore=E501
tty.die('You can use spack uninstall --dependents '
'to uninstall these dependencies as well')
if not args.yes_to_all:
tty.msg("The following packages will be uninstalled : ")

View file

@ -525,7 +525,7 @@ def clear(self):
ConfigScope('site', os.path.join(spack.etc_path, 'spack'))
"""User configuration can override both spack defaults and site config."""
ConfigScope('user', os.path.expanduser('~/.spack'))
ConfigScope('user', spack.user_config_path)
def highest_precedence_scope():

View file

@ -165,11 +165,11 @@ def __init__(self, root, db_dir=None):
def write_transaction(self, timeout=_db_lock_timeout):
"""Get a write lock context manager for use in a `with` block."""
return WriteTransaction(self, self._read, self._write, timeout)
return WriteTransaction(self.lock, self._read, self._write, timeout)
def read_transaction(self, timeout=_db_lock_timeout):
"""Get a read lock context manager for use in a `with` block."""
return ReadTransaction(self, self._read, None, timeout)
return ReadTransaction(self.lock, self._read, timeout=timeout)
def _write_to_yaml(self, stream):
"""Write out the databsae to a YAML file.
@ -352,12 +352,22 @@ def _check_ref_counts(self):
"Invalid ref_count: %s: %d (expected %d), in DB %s" %
(key, found, expected, self._index_path))
def _write(self):
def _write(self, type, value, traceback):
"""Write the in-memory database index to its file path.
Does no locking.
This is a helper function called by the WriteTransaction context
manager. If there is an exception while the write lock is active,
nothing will be written to the database file, but the in-memory
database *may* be left in an inconsistent state. It will be consistent
after the start of the next transaction, when it read from disk again.
This routine does no locking.
"""
# Do not write if exceptions were raised
if type is not None:
return
temp_file = self._index_path + (
'.%s.%s.temp' % (socket.getfqdn(), os.getpid()))
@ -589,49 +599,6 @@ def missing(self, spec):
return key in self._data and not self._data[key].installed
class _Transaction(object):
"""Simple nested transaction context manager that uses a file lock.
This class can trigger actions when the lock is acquired for the
first time and released for the last.
Timeout for lock is customizable.
"""
def __init__(self, db,
acquire_fn=None,
release_fn=None,
timeout=_db_lock_timeout):
self._db = db
self._timeout = timeout
self._acquire_fn = acquire_fn
self._release_fn = release_fn
def __enter__(self):
if self._enter() and self._acquire_fn:
self._acquire_fn()
def __exit__(self, type, value, traceback):
if self._exit() and self._release_fn:
self._release_fn()
class ReadTransaction(_Transaction):
def _enter(self):
return self._db.lock.acquire_read(self._timeout)
def _exit(self):
return self._db.lock.release_read()
class WriteTransaction(_Transaction):
def _enter(self):
return self._db.lock.acquire_write(self._timeout)
def _exit(self):
return self._db.lock.release_write()
class CorruptDatabaseError(SpackError):
def __init__(self, path, msg=''):
super(CorruptDatabaseError, self).__init__(

View file

@ -0,0 +1,183 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import shutil
from llnl.util.filesystem import *
from llnl.util.lock import *
from spack.error import SpackError
class FileCache(object):
"""This class manages cached data in the filesystem.
- Cache files are fetched and stored by unique keys. Keys can be relative
paths, so that thre can be some hierarchy in the cache.
- The FileCache handles locking cache files for reading and writing, so
client code need not manage locks for cache entries.
"""
def __init__(self, root):
"""Create a file cache object.
This will create the cache directory if it does not exist yet.
"""
self.root = root.rstrip(os.path.sep)
if not os.path.exists(self.root):
mkdirp(self.root)
self._locks = {}
def destroy(self):
"""Remove all files under the cache root."""
for f in os.listdir(self.root):
path = join_path(self.root, f)
if os.path.isdir(path):
shutil.rmtree(path, True)
else:
os.remove(path)
def cache_path(self, key):
"""Path to the file in the cache for a particular key."""
return join_path(self.root, key)
def _lock_path(self, key):
"""Path to the file in the cache for a particular key."""
keyfile = os.path.basename(key)
keydir = os.path.dirname(key)
return join_path(self.root, keydir, '.' + keyfile + '.lock')
def _get_lock(self, key):
"""Create a lock for a key, if necessary, and return a lock object."""
if key not in self._locks:
lock_file = self._lock_path(key)
if not os.path.exists(lock_file):
touch(lock_file)
self._locks[key] = Lock(lock_file)
return self._locks[key]
def init_entry(self, key):
"""Ensure we can access a cache file. Create a lock for it if needed.
Return whether the cache file exists yet or not.
"""
cache_path = self.cache_path(key)
exists = os.path.exists(cache_path)
if exists:
if not os.path.isfile(cache_path):
raise CacheError("Cache file is not a file: %s" % cache_path)
if not os.access(cache_path, os.R_OK | os.W_OK):
raise CacheError("Cannot access cache file: %s" % cache_path)
else:
# if the file is hierarchical, make parent directories
parent = os.path.dirname(cache_path)
if parent.rstrip(os.path.sep) != self.root:
mkdirp(parent)
if not os.access(parent, os.R_OK | os.W_OK):
raise CacheError("Cannot access cache directory: %s" % parent)
# ensure lock is created for this key
self._get_lock(key)
return exists
def read_transaction(self, key):
"""Get a read transaction on a file cache item.
Returns a ReadTransaction context manager and opens the cache file for
reading. You can use it like this:
with spack.user_cache.read_transaction(key) as cache_file:
cache_file.read()
"""
return ReadTransaction(
self._get_lock(key), lambda: open(self.cache_path(key)))
def write_transaction(self, key):
"""Get a write transaction on a file cache item.
Returns a WriteTransaction context manager that opens a temporary file
for writing. Once the context manager finishes, if nothing went wrong,
moves the file into place on top of the old file atomically.
"""
class WriteContextManager(object):
def __enter__(cm):
cm.orig_filename = self.cache_path(key)
cm.orig_file = None
if os.path.exists(cm.orig_filename):
cm.orig_file = open(cm.orig_filename, 'r')
cm.tmp_filename = self.cache_path(key) + '.tmp'
cm.tmp_file = open(cm.tmp_filename, 'w')
return cm.orig_file, cm.tmp_file
def __exit__(cm, type, value, traceback):
if cm.orig_file:
cm.orig_file.close()
cm.tmp_file.close()
if value:
# remove tmp on exception & raise it
shutil.rmtree(cm.tmp_filename, True)
raise value
else:
os.rename(cm.tmp_filename, cm.orig_filename)
return WriteTransaction(self._get_lock(key), WriteContextManager)
def mtime(self, key):
"""Return modification time of cache file, or 0 if it does not exist.
Time is in units returned by os.stat in the mtime field, which is
platform-dependent.
"""
if not self.init_entry(key):
return 0
else:
sinfo = os.stat(self.cache_path(key))
return sinfo.st_mtime
def remove(self, key):
lock = self._get_lock(key)
try:
lock.acquire_write()
os.unlink(self.cache_path(key))
finally:
lock.release_write()
os.unlink(self._lock_path(key))
class CacheError(SpackError):
pass

View file

@ -520,7 +520,8 @@ def header(self):
def prerequisite(self, spec):
tty.warn('prerequisites: not supported by dotkit module files')
tty.warn('\tYou may want to check ~/.spack/modules.yaml')
tty.warn('\tYou may want to check %s/modules.yaml'
% spack.user_config_path)
return ''

View file

@ -630,50 +630,12 @@ def activated(self):
exts = spack.install_layout.extension_map(self.extendee_spec)
return (self.name in exts) and (exts[self.name] == self.spec)
def preorder_traversal(self, visited=None, **kwargs):
"""This does a preorder traversal of the package's dependence DAG."""
virtual = kwargs.get("virtual", False)
if visited is None:
visited = set()
if self.name in visited:
return
visited.add(self.name)
if not virtual:
yield self
for name in sorted(self.dependencies.keys()):
dep_spec = self.get_dependency(name)
spec = dep_spec.spec
# Currently, we do not descend into virtual dependencies, as this
# makes doing a sensible traversal much harder. We just assume
# that ANY of the virtual deps will work, which might not be true
# (due to conflicts or unsatisfiable specs). For now this is ok,
# but we might want to reinvestigate if we start using a lot of
# complicated virtual dependencies
# TODO: reinvestigate this.
if spec.virtual:
if virtual:
yield spec
continue
for pkg in spack.repo.get(name).preorder_traversal(visited,
**kwargs):
yield pkg
def provides(self, vpkg_name):
"""
True if this package provides a virtual package with the specified name
"""
return any(s.name == vpkg_name for s in self.provided)
def virtual_dependencies(self, visited=None):
for spec in sorted(set(self.preorder_traversal(virtual=True))):
yield spec
@property
def installed(self):
return os.path.isdir(self.prefix)
@ -1236,7 +1198,15 @@ def install(self, spec, prefix):
def do_uninstall(self, force=False):
if not self.installed:
raise InstallError(str(self.spec) + " is not installed.")
# prefix may not exist, but DB may be inconsistent. Try to fix by
# removing, but omit hooks.
specs = spack.installed_db.query(self.spec, installed=True)
if specs:
spack.installed_db.remove(specs[0])
tty.msg("Removed stale DB entry for %s" % self.spec.short_spec)
return
else:
raise InstallError(str(self.spec) + " is not installed.")
if not force:
dependents = self.installed_dependents
@ -1446,6 +1416,7 @@ def use_cray_compiler_names():
os.environ['FC'] = 'ftn'
os.environ['F77'] = 'ftn'
def flatten_dependencies(spec, flat_dir):
"""Make each dependency of spec present in dir via symlink."""
for dep in spec.traverse(root=False):

View file

@ -25,8 +25,14 @@
"""
The ``virtual`` module contains utility classes for virtual dependencies.
"""
import spack.spec
import itertools
from itertools import product as iproduct
from pprint import pformat
import yaml
from yaml.error import MarkedYAMLError
import spack
class ProviderIndex(object):
"""This is a dict of dicts used for finding providers of particular
@ -44,13 +50,29 @@ class ProviderIndex(object):
Calling providers_for(spec) will find specs that provide a
matching implementation of MPI.
"""
def __init__(self, specs, **kwargs):
# TODO: come up with another name for this. This "restricts" values to
# the verbatim impu specs (i.e., it doesn't pre-apply package's constraints, and
# keeps things as broad as possible, so it's really the wrong name)
self.restrict = kwargs.setdefault('restrict', False)
"""
def __init__(self, specs=None, restrict=False):
"""Create a new ProviderIndex.
Optional arguments:
specs
List (or sequence) of specs. If provided, will call
`update` on this ProviderIndex with each spec in the list.
restrict
"restricts" values to the verbatim input specs; do not
pre-apply package's constraints.
TODO: rename this. It is intended to keep things as broad
as possible without overly restricting results, so it is
not the best name.
"""
if specs is None:
specs = []
self.restrict = restrict
self.providers = {}
for spec in specs:
@ -62,9 +84,8 @@ def __init__(self, specs, **kwargs):
self.update(spec)
def update(self, spec):
if type(spec) != spack.spec.Spec:
if not isinstance(spec, spack.spec.Spec):
spec = spack.spec.Spec(spec)
if not spec.name:
@ -75,12 +96,13 @@ def update(self, spec):
pkg = spec.package
for provided_spec, provider_spec in pkg.provided.iteritems():
provider_spec.compiler_flags = spec.compiler_flags.copy()#We want satisfaction other than flags
# We want satisfaction other than flags
provider_spec.compiler_flags = spec.compiler_flags.copy()
if provider_spec.satisfies(spec, deps=False):
provided_name = provided_spec.name
provider_map = self.providers.setdefault(provided_name, {})
if not provided_spec in provider_map:
if provided_spec not in provider_map:
provider_map[provided_spec] = set()
if self.restrict:
@ -102,7 +124,6 @@ def update(self, spec):
constrained.constrain(provider_spec)
provider_map[provided_spec].add(constrained)
def providers_for(self, *vpkg_specs):
"""Gives specs of all packages that provide virtual packages
with the supplied specs."""
@ -114,26 +135,25 @@ def providers_for(self, *vpkg_specs):
# Add all the providers that satisfy the vpkg spec.
if vspec.name in self.providers:
for provider_spec, spec_set in self.providers[vspec.name].items():
if provider_spec.satisfies(vspec, deps=False):
for p_spec, spec_set in self.providers[vspec.name].items():
if p_spec.satisfies(vspec, deps=False):
providers.update(spec_set)
# Return providers in order
return sorted(providers)
# TODO: this is pretty darned nasty, and inefficient, but there
# are not that many vdeps in most specs.
def _cross_provider_maps(self, lmap, rmap):
result = {}
for lspec, rspec in itertools.product(lmap, rmap):
for lspec, rspec in iproduct(lmap, rmap):
try:
constrained = lspec.constrained(rspec)
except spack.spec.UnsatisfiableSpecError:
continue
# lp and rp are left and right provider specs.
for lp_spec, rp_spec in itertools.product(lmap[lspec], rmap[rspec]):
for lp_spec, rp_spec in iproduct(lmap[lspec], rmap[rspec]):
if lp_spec.name == rp_spec.name:
try:
const = lp_spec.constrained(rp_spec, deps=False)
@ -142,12 +162,10 @@ def _cross_provider_maps(self, lmap, rmap):
continue
return result
def __contains__(self, name):
"""Whether a particular vpkg name is in the index."""
return name in self.providers
def satisfies(self, other):
"""Check that providers of virtual specs are compatible."""
common = set(self.providers) & set(other.providers)
@ -164,3 +182,111 @@ def satisfies(self, other):
result[name] = crossed
return all(c in result for c in common)
def to_yaml(self, stream=None):
provider_list = self._transform(
lambda vpkg, pset: [
vpkg.to_node_dict(), [p.to_node_dict() for p in pset]], list)
yaml.dump({'provider_index': {'providers': provider_list}},
stream=stream)
@staticmethod
def from_yaml(stream):
try:
yfile = yaml.load(stream)
except MarkedYAMLError, e:
raise spack.spec.SpackYAMLError(
"error parsing YAML ProviderIndex cache:", str(e))
if not isinstance(yfile, dict):
raise spack.spec.SpackYAMLError(
"YAML ProviderIndex was not a dict.")
if 'provider_index' not in yfile:
raise spack.spec.SpackYAMLError(
"YAML ProviderIndex does not start with 'provider_index'")
index = ProviderIndex()
providers = yfile['provider_index']['providers']
index.providers = _transform(
providers,
lambda vpkg, plist: (
spack.spec.Spec.from_node_dict(vpkg),
set(spack.spec.Spec.from_node_dict(p) for p in plist)))
return index
def merge(self, other):
"""Merge `other` ProviderIndex into this one."""
other = other.copy() # defensive copy.
for pkg in other.providers:
if pkg not in self.providers:
self.providers[pkg] = other.providers[pkg]
continue
spdict, opdict = self.providers[pkg], other.providers[pkg]
for provided_spec in opdict:
if provided_spec not in spdict:
spdict[provided_spec] = opdict[provided_spec]
continue
spdict[provided_spec] += opdict[provided_spec]
def remove_provider(self, pkg_name):
"""Remove a provider from the ProviderIndex."""
empty_pkg_dict = []
for pkg, pkg_dict in self.providers.items():
empty_pset = []
for provided, pset in pkg_dict.items():
same_name = set(p for p in pset if p.fullname == pkg_name)
pset.difference_update(same_name)
if not pset:
empty_pset.append(provided)
for provided in empty_pset:
del pkg_dict[provided]
if not pkg_dict:
empty_pkg_dict.append(pkg)
for pkg in empty_pkg_dict:
del self.providers[pkg]
def copy(self):
"""Deep copy of this ProviderIndex."""
clone = ProviderIndex()
clone.providers = self._transform(
lambda vpkg, pset: (vpkg, set((p.copy() for p in pset))))
return clone
def __eq__(self, other):
return self.providers == other.providers
def _transform(self, transform_fun, out_mapping_type=dict):
return _transform(self.providers, transform_fun, out_mapping_type)
def __str__(self):
return pformat(
_transform(self.providers,
lambda k, v: (k, list(v))))
def _transform(providers, transform_fun, out_mapping_type=dict):
"""Syntactic sugar for transforming a providers dict.
transform_fun takes a (vpkg, pset) mapping and runs it on each
pair in nested dicts.
"""
def mapiter(mappings):
if isinstance(mappings, dict):
return mappings.iteritems()
else:
return iter(mappings)
return dict(
(name, out_mapping_type([
transform_fun(vpkg, pset) for vpkg, pset in mapiter(mappings)]))
for name, mappings in providers.items())

View file

@ -23,6 +23,9 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import stat
import shutil
import errno
import exceptions
import sys
import inspect
@ -30,15 +33,18 @@
import re
import traceback
from bisect import bisect_left
from types import ModuleType
import yaml
import llnl.util.tty as tty
from llnl.util.filesystem import *
import spack
import spack.error
import spack.config
import spack.spec
from spack.virtual import ProviderIndex
from spack.provider_index import ProviderIndex
from spack.util.naming import *
#
@ -51,6 +57,7 @@
# These names describe how repos should be laid out in the filesystem.
#
repo_config_name = 'repo.yaml' # Top-level filename for repo config.
repo_index_name = 'index.yaml' # Top-level filename for repository index.
packages_dir_name = 'packages' # Top-level repo directory containing pkgs.
package_file_name = 'package.py' # Filename for packages in a repository.
@ -68,12 +75,21 @@ def converter(self, spec_like, *args, **kwargs):
return converter
def _make_namespace_module(ns):
module = imp.new_module(ns)
module.__file__ = "(spack namespace)"
module.__path__ = []
module.__package__ = ns
return module
class SpackNamespace(ModuleType):
""" Allow lazy loading of modules."""
def __init__(self, namespace):
super(ModuleType, self).__init__(self, namespace)
self.__file__ = "(spack namespace)"
self.__path__ = []
self.__name__ = namespace
self.__package__ = namespace
self.__modules = {}
def __getattr__(self, name):
"""Getattr lazily loads modules if they're not already loaded."""
submodule = self.__package__ + '.' + name
setattr(self, name, __import__(submodule))
return getattr(self, name)
def substitute_spack_prefix(path):
@ -104,7 +120,7 @@ def __init__(self, *repo_dirs, **kwargs):
self.by_namespace = NamespaceTrie()
self.by_path = {}
self._all_package_names = []
self._all_package_names = None
self._provider_index = None
# If repo_dirs is empty, just use the configuration
@ -125,7 +141,6 @@ def __init__(self, *repo_dirs, **kwargs):
"To remove the bad repository, run this command:",
" spack repo rm %s" % root)
def swap(self, other):
"""Convenience function to make swapping repostiories easier.
@ -143,7 +158,6 @@ def swap(self, other):
setattr(self, attr, getattr(other, attr))
setattr(other, attr, tmp)
def _add(self, repo):
"""Add a repository to the namespace and path indexes.
@ -157,36 +171,28 @@ def _add(self, repo):
if repo.namespace in self.by_namespace:
raise DuplicateRepoError(
"Package repos '%s' and '%s' both provide namespace %s"
% (repo.root, self.by_namespace[repo.namespace].root, repo.namespace))
% (repo.root, self.by_namespace[repo.namespace].root,
repo.namespace))
# Add repo to the pkg indexes
self.by_namespace[repo.full_namespace] = repo
self.by_path[repo.root] = repo
# add names to the cached name list
new_pkgs = set(repo.all_package_names())
new_pkgs.update(set(self._all_package_names))
self._all_package_names = sorted(new_pkgs, key=lambda n:n.lower())
def put_first(self, repo):
"""Add repo first in the search path."""
self._add(repo)
self.repos.insert(0, repo)
def put_last(self, repo):
"""Add repo last in the search path."""
self._add(repo)
self.repos.append(repo)
def remove(self, repo):
"""Remove a repo from the search path."""
if repo in self.repos:
self.repos.remove(repo)
def get_repo(self, namespace, default=NOT_PROVIDED):
"""Get a repository by namespace.
Arguments
@ -206,38 +212,45 @@ def get_repo(self, namespace, default=NOT_PROVIDED):
return default
return self.by_namespace[fullspace]
def first_repo(self):
"""Get the first repo in precedence order."""
return self.repos[0] if self.repos else None
def all_package_names(self):
"""Return all unique package names in all repositories."""
if self._all_package_names is None:
all_pkgs = set()
for repo in self.repos:
for name in repo.all_package_names():
all_pkgs.add(name)
self._all_package_names = sorted(all_pkgs, key=lambda n: n.lower())
return self._all_package_names
def all_packages(self):
for name in self.all_package_names():
yield self.get(name)
@property
def provider_index(self):
"""Merged ProviderIndex from all Repos in the RepoPath."""
if self._provider_index is None:
self._provider_index = ProviderIndex()
for repo in reversed(self.repos):
self._provider_index.merge(repo.provider_index)
return self._provider_index
@_autospec
def providers_for(self, vpkg_spec):
if self._provider_index is None:
self._provider_index = ProviderIndex(self.all_package_names())
providers = self._provider_index.providers_for(vpkg_spec)
providers = self.provider_index.providers_for(vpkg_spec)
if not providers:
raise UnknownPackageError(vpkg_spec.name)
return providers
@_autospec
def extensions_for(self, extendee_spec):
return [p for p in self.all_packages() if p.extends(extendee_spec)]
def find_module(self, fullname, path=None):
"""Implements precedence for overlaid namespaces.
@ -264,7 +277,6 @@ def find_module(self, fullname, path=None):
return None
def load_module(self, fullname):
"""Handles loading container namespaces when necessary.
@ -273,18 +285,14 @@ def load_module(self, fullname):
if fullname in sys.modules:
return sys.modules[fullname]
# partition fullname into prefix and module name.
namespace, dot, module_name = fullname.rpartition('.')
if not self.by_namespace.is_prefix(fullname):
raise ImportError("No such Spack repo: %s" % fullname)
module = _make_namespace_module(namespace)
module = SpackNamespace(fullname)
module.__loader__ = self
sys.modules[fullname] = module
return module
@_autospec
def repo_for_pkg(self, spec):
"""Given a spec, get the repository for its package."""
@ -306,7 +314,6 @@ def repo_for_pkg(self, spec):
# that can operate on packages that don't exist yet.
return self.first_repo()
@_autospec
def get(self, spec, new=False):
"""Find a repo that contains the supplied spec's package.
@ -315,12 +322,10 @@ def get(self, spec, new=False):
"""
return self.repo_for_pkg(spec).get(spec)
def get_pkg_class(self, pkg_name):
"""Find a class for the spec's package and return the class object."""
return self.repo_for_pkg(pkg_name).get_pkg_class(pkg_name)
@_autospec
def dump_provenance(self, spec, path):
"""Dump provenance information for a spec to a particular path.
@ -330,24 +335,19 @@ def dump_provenance(self, spec, path):
"""
return self.repo_for_pkg(spec).dump_provenance(spec, path)
def dirname_for_package_name(self, pkg_name):
return self.repo_for_pkg(pkg_name).dirname_for_package_name(pkg_name)
def filename_for_package_name(self, pkg_name):
return self.repo_for_pkg(pkg_name).filename_for_package_name(pkg_name)
def exists(self, pkg_name):
return any(repo.exists(pkg_name) for repo in self.repos)
def __contains__(self, pkg_name):
return self.exists(pkg_name)
class Repo(object):
"""Class representing a package repository in the filesystem.
@ -381,12 +381,14 @@ def __init__(self, root, namespace=repo_namespace):
# check and raise BadRepoError on fail.
def check(condition, msg):
if not condition: raise BadRepoError(msg)
if not condition:
raise BadRepoError(msg)
# Validate repository layout.
self.config_file = join_path(self.root, repo_config_name)
self.config_file = join_path(self.root, repo_config_name)
check(os.path.isfile(self.config_file),
"No %s found in '%s'" % (repo_config_name, root))
self.packages_path = join_path(self.root, packages_dir_name)
check(os.path.isdir(self.packages_path),
"No directory '%s' found in '%s'" % (repo_config_name, root))
@ -398,12 +400,14 @@ def check(condition, msg):
self.namespace = config['namespace']
check(re.match(r'[a-zA-Z][a-zA-Z0-9_.]+', self.namespace),
("Invalid namespace '%s' in repo '%s'. " % (self.namespace, self.root)) +
("Invalid namespace '%s' in repo '%s'. "
% (self.namespace, self.root)) +
"Namespaces must be valid python identifiers separated by '.'")
# Set up 'full_namespace' to include the super-namespace
if self.super_namespace:
self.full_namespace = "%s.%s" % (self.super_namespace, self.namespace)
self.full_namespace = "%s.%s" % (
self.super_namespace, self.namespace)
else:
self.full_namespace = self.namespace
@ -414,12 +418,21 @@ def check(condition, msg):
self._modules = {}
self._classes = {}
self._instances = {}
# list of packages that are newer than the index.
self._needs_update = []
# Index of virtual dependencies
self._provider_index = None
# Cached list of package names.
self._all_package_names = None
# make sure the namespace for packages in this repo exists.
self._create_namespace()
# Unique filename for cache of virtual dependency providers
self._cache_file = 'providers/%s-index.yaml' % self.namespace
def _create_namespace(self):
"""Create this repo's namespace module and insert it into sys.modules.
@ -429,10 +442,11 @@ def _create_namespace(self):
"""
parent = None
for l in range(1, len(self._names)+1):
for l in range(1, len(self._names) + 1):
ns = '.'.join(self._names[:l])
if not ns in sys.modules:
module = _make_namespace_module(ns)
if ns not in sys.modules:
module = SpackNamespace(ns)
module.__loader__ = self
sys.modules[ns] = module
@ -442,14 +456,14 @@ def _create_namespace(self):
# This ensures that we can do things like:
# import spack.pkg.builtin.mpich as mpich
if parent:
modname = self._names[l-1]
if not hasattr(parent, modname):
setattr(parent, modname, module)
modname = self._names[l - 1]
setattr(parent, modname, module)
else:
# no need to set up a module, but keep track of the parent.
# no need to set up a module
module = sys.modules[ns]
parent = module
# but keep track of the parent in this loop
parent = module
def real_name(self, import_name):
"""Allow users to import Spack packages using Python identifiers.
@ -476,13 +490,11 @@ def real_name(self, import_name):
return name
return None
def is_prefix(self, fullname):
"""True if fullname is a prefix of this Repo's namespace."""
parts = fullname.split('.')
return self._names[:len(parts)] == parts
def find_module(self, fullname, path=None):
"""Python find_module import hook.
@ -498,7 +510,6 @@ def find_module(self, fullname, path=None):
return None
def load_module(self, fullname):
"""Python importer load hook.
@ -510,7 +521,7 @@ def load_module(self, fullname):
namespace, dot, module_name = fullname.rpartition('.')
if self.is_prefix(fullname):
module = _make_namespace_module(fullname)
module = SpackNamespace(fullname)
elif namespace == self.full_namespace:
real_name = self.real_name(module_name)
@ -523,8 +534,12 @@ def load_module(self, fullname):
module.__loader__ = self
sys.modules[fullname] = module
return module
if namespace != fullname:
parent = sys.modules[namespace]
if not hasattr(parent, module_name):
setattr(parent, module_name, module)
return module
def _read_config(self):
"""Check for a YAML config file in this db's root directory."""
@ -533,40 +548,39 @@ def _read_config(self):
yaml_data = yaml.load(reponame_file)
if (not yaml_data or 'repo' not in yaml_data or
not isinstance(yaml_data['repo'], dict)):
tty.die("Invalid %s in repository %s"
% (repo_config_name, self.root))
not isinstance(yaml_data['repo'], dict)):
tty.die("Invalid %s in repository %s" % (
repo_config_name, self.root))
return yaml_data['repo']
except exceptions.IOError, e:
except exceptions.IOError:
tty.die("Error reading %s when opening %s"
% (self.config_file, self.root))
@_autospec
def get(self, spec, new=False):
if spec.virtual:
raise UnknownPackageError(spec.name)
if spec.namespace and spec.namespace != self.namespace:
raise UnknownPackageError("Repository %s does not contain package %s"
% (self.namespace, spec.fullname))
raise UnknownPackageError(
"Repository %s does not contain package %s"
% (self.namespace, spec.fullname))
key = hash(spec)
if new or key not in self._instances:
package_class = self.get_pkg_class(spec.name)
try:
copy = spec.copy() # defensive copy. Package owns its spec.
copy = spec.copy() # defensive copy. Package owns its spec.
self._instances[key] = package_class(copy)
except Exception, e:
except Exception:
if spack.debug:
sys.excepthook(*sys.exc_info())
raise FailedConstructorError(spec.fullname, *sys.exc_info())
return self._instances[key]
@_autospec
def dump_provenance(self, spec, path):
"""Dump provenance information for a spec to a particular path.
@ -579,8 +593,9 @@ def dump_provenance(self, spec, path):
raise UnknownPackageError(spec.name)
if spec.namespace and spec.namespace != self.namespace:
raise UnknownPackageError("Repository %s does not contain package %s."
% (self.namespace, spec.fullname))
raise UnknownPackageError(
"Repository %s does not contain package %s."
% (self.namespace, spec.fullname))
# Install any patch files needed by packages.
mkdirp(path)
@ -595,34 +610,61 @@ def dump_provenance(self, spec, path):
# Install the package.py file itself.
install(self.filename_for_package_name(spec), path)
def purge(self):
"""Clear entire package instance cache."""
self._instances.clear()
def _update_provider_index(self):
# Check modification dates of all packages
self._fast_package_check()
def read():
with open(self.index_file) as f:
self._provider_index = ProviderIndex.from_yaml(f)
# Read the old ProviderIndex, or make a new one.
key = self._cache_file
index_existed = spack.user_cache.init_entry(key)
if index_existed and not self._needs_update:
with spack.user_cache.read_transaction(key) as f:
self._provider_index = ProviderIndex.from_yaml(f)
else:
with spack.user_cache.write_transaction(key) as (old, new):
if old:
self._provider_index = ProviderIndex.from_yaml(old)
else:
self._provider_index = ProviderIndex()
for pkg_name in self._needs_update:
namespaced_name = '%s.%s' % (self.namespace, pkg_name)
self._provider_index.remove_provider(namespaced_name)
self._provider_index.update(namespaced_name)
self._provider_index.to_yaml(new)
@property
def provider_index(self):
"""A provider index with names *specific* to this repo."""
if self._provider_index is None:
self._update_provider_index()
return self._provider_index
@_autospec
def providers_for(self, vpkg_spec):
if self._provider_index is None:
self._provider_index = ProviderIndex(self.all_package_names())
providers = self._provider_index.providers_for(vpkg_spec)
providers = self.provider_index.providers_for(vpkg_spec)
if not providers:
raise UnknownPackageError(vpkg_spec.name)
return providers
@_autospec
def extensions_for(self, extendee_spec):
return [p for p in self.all_packages() if p.extends(extendee_spec)]
def _check_namespace(self, spec):
"""Check that the spec's namespace is the same as this repository's."""
if spec.namespace and spec.namespace != self.namespace:
raise UnknownNamespaceError(spec.namespace)
@_autospec
def dirname_for_package_name(self, spec):
"""Get the directory name for a particular package. This is the
@ -630,7 +672,6 @@ def dirname_for_package_name(self, spec):
self._check_namespace(spec)
return join_path(self.packages_path, spec.name)
@_autospec
def filename_for_package_name(self, spec):
"""Get the filename for the module we should load for a particular
@ -645,48 +686,95 @@ def filename_for_package_name(self, spec):
pkg_dir = self.dirname_for_package_name(spec.name)
return join_path(pkg_dir, package_file_name)
def _fast_package_check(self):
"""List packages in the repo and check whether index is up to date.
def all_package_names(self):
"""Returns a sorted list of all package names in the Repo."""
Both of these opreations require checking all `package.py`
files so we do them at the same time. We list the repo
directory and look at package.py files, and we compare the
index modification date with the ost recently modified package
file, storing the result.
The implementation here should try to minimize filesystem
calls. At the moment, it is O(number of packages) and makes
about one stat call per package. This is resonably fast, and
avoids actually importing packages in Spack, which is slow.
"""
if self._all_package_names is None:
self._all_package_names = []
# Get index modification time.
index_mtime = spack.user_cache.mtime(self._cache_file)
for pkg_name in os.listdir(self.packages_path):
# Skip non-directories in the package root.
pkg_dir = join_path(self.packages_path, pkg_name)
if not os.path.isdir(pkg_dir):
continue
# Skip directories without a package.py in them.
pkg_file = join_path(self.packages_path, pkg_name, package_file_name)
if not os.path.isfile(pkg_file):
continue
# Warn about invalid names that look like packages.
if not valid_module_name(pkg_name):
tty.warn("Skipping package at %s. '%s' is not a valid Spack module name."
% (pkg_dir, pkg_name))
msg = ("Skipping package at %s. "
"'%s' is not a valid Spack module name.")
tty.warn(msg % (pkg_dir, pkg_name))
continue
# construct the file name from the directory
pkg_file = join_path(
self.packages_path, pkg_name, package_file_name)
# Use stat here to avoid lots of calls to the filesystem.
try:
sinfo = os.stat(pkg_file)
except OSError as e:
if e.errno == errno.ENOENT:
# No package.py file here.
continue
elif e.errno == errno.EACCES:
tty.warn("Can't read package file %s." % pkg_file)
continue
raise e
# if it's not a file, skip it.
if stat.S_ISDIR(sinfo.st_mode):
continue
# All checks passed. Add it to the list.
self._all_package_names.append(pkg_name)
# record the package if it is newer than the index.
if sinfo.st_mtime > index_mtime:
self._needs_update.append(pkg_name)
self._all_package_names.sort()
return self._all_package_names
def all_package_names(self):
"""Returns a sorted list of all package names in the Repo."""
self._fast_package_check()
return self._all_package_names
def all_packages(self):
"""Iterator over all packages in the repository.
Use this with care, because loading packages is slow.
"""
for name in self.all_package_names():
yield self.get(name)
def exists(self, pkg_name):
"""Whether a package with the supplied name exists."""
# This does a binary search in the sorted list.
idx = bisect_left(self.all_package_names(), pkg_name)
return (idx < len(self._all_package_names) and
self._all_package_names[idx] == pkg_name)
if self._all_package_names:
# This does a binary search in the sorted list.
idx = bisect_left(self.all_package_names(), pkg_name)
return (idx < len(self._all_package_names) and
self._all_package_names[idx] == pkg_name)
# If we haven't generated the full package list, don't.
# Just check whether the file exists.
filename = self.filename_for_package_name(pkg_name)
return os.path.exists(filename)
def _get_pkg_module(self, pkg_name):
"""Create a module for a particular package.
@ -719,7 +807,6 @@ def _get_pkg_module(self, pkg_name):
return self._modules[pkg_name]
def get_pkg_class(self, pkg_name):
"""Get the class for the package out of its module.
@ -727,6 +814,11 @@ def get_pkg_class(self, pkg_name):
package. Then extracts the package class from the module
according to Spack's naming convention.
"""
namespace, _, pkg_name = pkg_name.rpartition('.')
if namespace and (namespace != self.namespace):
raise InvalidNamespaceError('Invalid namespace for %s repo: %s'
% (self.namespace, namespace))
class_name = mod_to_class(pkg_name)
module = self._get_pkg_module(pkg_name)
@ -736,15 +828,12 @@ def get_pkg_class(self, pkg_name):
return cls
def __str__(self):
return "[Repo '%s' at '%s']" % (self.namespace, self.root)
def __repr__(self):
return self.__str__()
def __contains__(self, pkg_name):
return self.exists(pkg_name)
@ -753,30 +842,37 @@ def create_repo(root, namespace=None):
"""Create a new repository in root with the specified namespace.
If the namespace is not provided, use basename of root.
Return the canonicalized path and the namespace of the created repository.
Return the canonicalized path and namespace of the created repository.
"""
root = canonicalize_path(root)
if not namespace:
namespace = os.path.basename(root)
if not re.match(r'\w[\.\w-]*', namespace):
raise InvalidNamespaceError("'%s' is not a valid namespace." % namespace)
raise InvalidNamespaceError(
"'%s' is not a valid namespace." % namespace)
existed = False
if os.path.exists(root):
if os.path.isfile(root):
raise BadRepoError('File %s already exists and is not a directory' % root)
raise BadRepoError('File %s already exists and is not a directory'
% root)
elif os.path.isdir(root):
if not os.access(root, os.R_OK | os.W_OK):
raise BadRepoError('Cannot create new repo in %s: cannot access directory.' % root)
raise BadRepoError(
'Cannot create new repo in %s: cannot access directory.'
% root)
if os.listdir(root):
raise BadRepoError('Cannot create new repo in %s: directory is not empty.' % root)
raise BadRepoError(
'Cannot create new repo in %s: directory is not empty.'
% root)
existed = True
full_path = os.path.realpath(root)
parent = os.path.dirname(full_path)
if not os.access(parent, os.R_OK | os.W_OK):
raise BadRepoError("Cannot create repository in %s: can't access parent!" % root)
raise BadRepoError(
"Cannot create repository in %s: can't access parent!" % root)
try:
config_path = os.path.join(root, repo_config_name)

View file

@ -102,23 +102,26 @@
from StringIO import StringIO
from operator import attrgetter
import yaml
from yaml.error import MarkedYAMLError
import llnl.util.tty as tty
from llnl.util.filesystem import join_path
from llnl.util.lang import *
from llnl.util.tty.color import *
import spack
import spack.architecture
import spack.compilers as compilers
import spack.error
import spack.parse
import yaml
from llnl.util.filesystem import join_path
from llnl.util.lang import *
from llnl.util.tty.color import *
from spack.build_environment import get_path_from_module, load_module
from spack.util.naming import mod_to_class
from spack.util.prefix import Prefix
from spack.util.string import *
from spack.version import *
from spack.virtual import ProviderIndex
from yaml.error import MarkedYAMLError
from spack.provider_index import ProviderIndex
# Valid pattern for an identifier in Spack
identifier_re = r'\w[\w-]*'
@ -438,8 +441,7 @@ def copy(self):
return clone
def _cmp_key(self):
return ''.join(str(key) + ' '.join(str(v) for v in value)
for key, value in sorted(self.items()))
return tuple((k, tuple(v)) for k, v in sorted(self.iteritems()))
def __str__(self):
sorted_keys = filter(
@ -715,7 +717,7 @@ def package_class(self):
"""Internal package call gets only the class object for a package.
Use this to just get package metadata.
"""
return spack.repo.get_pkg_class(self.name)
return spack.repo.get_pkg_class(self.fullname)
@property
def virtual(self):
@ -904,37 +906,36 @@ def dag_hash(self, length=None):
return b32_hash
def to_node_dict(self):
d = {}
params = dict((name, v.value) for name, v in self.variants.items())
params.update(dict((name, value)
for name, value in self.compiler_flags.items()))
deps = self.dependencies_dict(deptype=('link', 'run'))
d = {
'parameters': params,
'arch': self.architecture,
'dependencies': dict(
if params:
d['parameters'] = params
if self.dependencies():
deps = self.dependencies_dict(deptype=('link', 'run'))
d['dependencies'] = dict(
(name, {
'hash': dspec.spec.dag_hash(),
'type': [str(s) for s in dspec.deptypes]})
for name, dspec in deps.items())
}
# Older concrete specs do not have a namespace. Omit for
# consistent hashing.
if not self.concrete or self.namespace:
if self.namespace:
d['namespace'] = self.namespace
if self.architecture:
# TODO: Fix the target.to_dict to account for the tuple
# Want it to be a dict of dicts
d['arch'] = self.architecture.to_dict()
else:
d['arch'] = None
if self.compiler:
d.update(self.compiler.to_dict())
else:
d['compiler'] = None
d.update(self.versions.to_dict())
if self.versions:
d.update(self.versions.to_dict())
return {self.name: d}
@ -954,17 +955,18 @@ def from_node_dict(node):
spec = Spec(name)
spec.namespace = node.get('namespace', None)
spec.versions = VersionList.from_dict(node)
spec._hash = node.get('hash', None)
if 'hash' in node:
spec._hash = node['hash']
if 'version' in node or 'versions' in node:
spec.versions = VersionList.from_dict(node)
spec.architecture = spack.architecture.arch_from_dict(node['arch'])
if 'arch' in node:
spec.architecture = spack.architecture.arch_from_dict(node['arch'])
if node['compiler'] is None:
spec.compiler = None
else:
if 'compiler' in node:
spec.compiler = CompilerSpec.from_dict(node)
else:
spec.compiler = None
if 'parameters' in node:
for name, value in node['parameters'].items():
@ -972,14 +974,12 @@ def from_node_dict(node):
spec.compiler_flags[name] = value
else:
spec.variants[name] = VariantSpec(name, value)
elif 'variants' in node:
for name, value in node['variants'].items():
spec.variants[name] = VariantSpec(name, value)
for name in FlagMap.valid_compiler_flags():
spec.compiler_flags[name] = []
else:
raise SpackRecordError(
"Did not find a valid format for variants in YAML file")
# Don't read dependencies here; from_node_dict() is used by
# from_yaml() to read the root *and* each dependency spec.
@ -1037,6 +1037,10 @@ def from_yaml(stream):
for node in nodes:
# get dependency dict from the node.
name = next(iter(node))
if 'dependencies' not in node[name]:
continue
yaml_deps = node[name]['dependencies']
for dname, dhash, dtypes in Spec.read_yaml_dep_specs(yaml_deps):
# Fill in dependencies by looking them up by name in deps dict
@ -1567,7 +1571,7 @@ def validate_names(self):
UnsupportedCompilerError.
"""
for spec in self.traverse():
# Don't get a package for a virtual name.
# raise an UnknownPackageError if the spec's package isn't real.
if (not spec.virtual) and spec.name:
spack.repo.get(spec.fullname)
@ -2824,12 +2828,6 @@ def __init__(self, msg, yaml_error):
super(SpackYAMLError, self).__init__(msg, str(yaml_error))
class SpackRecordError(spack.error.SpackError):
def __init__(self, msg):
super(SpackRecordError, self).__init__(msg)
class AmbiguousHashError(SpecError):
def __init__(self, msg, *specs):

View file

@ -315,7 +315,8 @@ def fetch(self, mirror_only=False):
# Add URL strategies for all the mirrors with the digest
for url in urls:
fetchers.insert(0, fs.URLFetchStrategy(url, digest))
fetchers.insert(0, spack.cache.fetcher(self.mirror_path, digest))
fetchers.insert(0, spack.fetch_cache.fetcher(self.mirror_path,
digest))
# Look for the archive in list_url
package_name = os.path.dirname(self.mirror_path)
@ -365,7 +366,7 @@ def check(self):
self.fetcher.check()
def cache_local(self):
spack.cache.store(self.fetcher, self.mirror_path)
spack.fetch_cache.store(self.fetcher, self.mirror_path)
def expand_archive(self):
"""Changes to the stage directory and attempt to expand the downloaded

View file

@ -23,6 +23,7 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import sys
import os
import llnl.util.tty as tty
import nose
@ -32,16 +33,53 @@
from spack.test.tally_plugin import Tally
"""Names of tests to be included in Spack's test suite"""
# All the tests Spack knows about.
# Keep these one per line so that it's easy to see changes in diffs.
test_names = [
'architecture', 'versions', 'url_parse', 'url_substitution', 'packages',
'stage', 'spec_syntax', 'spec_semantics', 'spec_dag', 'concretize',
'multimethod', 'install', 'package_sanity', 'config', 'directory_layout',
'pattern', 'python_version', 'git_fetch', 'svn_fetch', 'hg_fetch',
'mirror', 'modules', 'url_extrapolate', 'cc', 'link_tree', 'spec_yaml',
'optional_deps', 'make_executable', 'build_system_guess', 'lock',
'database', 'namespace_trie', 'yaml', 'sbang', 'environment',
'concretize_preferences', 'cmd.find', 'cmd.uninstall', 'cmd.test_install',
'cmd.test_compiler_cmd', 'cmd.module'
'architecture',
'build_system_guess',
'cc',
'cmd.find',
'cmd.module',
'cmd.test_install',
'cmd.uninstall',
'concretize',
'concretize_preferences',
'config',
'database',
'directory_layout',
'environment',
'file_cache',
'git_fetch',
'hg_fetch',
'install',
'link_tree',
'lock',
'make_executable',
'mirror',
'modules',
'multimethod',
'namespace_trie',
'optional_deps',
'package_sanity',
'packages',
'pattern',
'python_version',
'sbang',
'spec_dag',
'spec_semantics',
'spec_syntax',
'spec_yaml',
'stage',
'svn_fetch',
'url_extrapolate',
'url_parse',
'url_substitution',
'versions',
'provider_index',
'yaml',
# This test needs to be last until global compiler cache is fixed.
'cmd.test_compiler_cmd',
]
@ -53,6 +91,10 @@ def list_tests():
def run(names, outputDir, verbose=False):
"""Run tests with the supplied names. Names should be a list. If
it's empty, run ALL of Spack's tests."""
# Print output to stdout if verbose is 1.
if verbose:
os.environ['NOSE_NOCAPTURE'] = '1'
if not names:
names = test_names
else:

View file

@ -86,6 +86,29 @@ def test_platform(self):
self.assertEqual(str(output_platform_class), str(my_platform_class))
def test_boolness(self):
# Make sure architecture reports that it's False when nothing's set.
arch = spack.architecture.Arch()
self.assertFalse(arch)
# Dummy architecture parts
plat = spack.architecture.platform()
plat_os = plat.operating_system('default_os')
plat_target = plat.target('default_target')
# Make sure architecture reports that it's True when anything is set.
arch = spack.architecture.Arch()
arch.platform = plat
self.assertTrue(arch)
arch = spack.architecture.Arch()
arch.platform_os = plat_os
self.assertTrue(arch)
arch = spack.architecture.Arch()
arch.target = plat_target
self.assertTrue(arch)
def test_user_front_end_input(self):
"""Test when user inputs just frontend that both the frontend target
and frontend operating system match

View file

@ -29,6 +29,7 @@
from spack.concretize import find_spec
from spack.test.mock_packages_test import *
class ConcretizeTest(MockPackagesTest):
def check_spec(self, abstract, concrete):
@ -59,7 +60,6 @@ def check_spec(self, abstract, concrete):
if abstract.architecture and abstract.architecture.concrete:
self.assertEqual(abstract.architecture, concrete.architecture)
def check_concretize(self, abstract_spec):
abstract = Spec(abstract_spec)
concrete = abstract.concretized()
@ -70,29 +70,24 @@ def check_concretize(self, abstract_spec):
return concrete
def test_concretize_no_deps(self):
self.check_concretize('libelf')
self.check_concretize('libelf@0.8.13')
def test_concretize_dag(self):
self.check_concretize('callpath')
self.check_concretize('mpileaks')
self.check_concretize('libelf')
def test_concretize_variant(self):
self.check_concretize('mpich+debug')
self.check_concretize('mpich~debug')
self.check_concretize('mpich debug=2')
self.check_concretize('mpich')
def test_conretize_compiler_flags(self):
self.check_concretize('mpich cppflags="-O3"')
def test_concretize_preferred_version(self):
spec = self.check_concretize('python')
self.assertEqual(spec.versions, ver('2.7.11'))
@ -100,7 +95,6 @@ def test_concretize_preferred_version(self):
spec = self.check_concretize('python@3.5.1')
self.assertEqual(spec.versions, ver('3.5.1'))
def test_concretize_with_virtual(self):
self.check_concretize('mpileaks ^mpi')
self.check_concretize('mpileaks ^mpi@:1.1')
@ -111,7 +105,6 @@ def test_concretize_with_virtual(self):
self.check_concretize('mpileaks ^mpi@:1')
self.check_concretize('mpileaks ^mpi@1.2:2')
def test_concretize_with_restricted_virtual(self):
self.check_concretize('mpileaks ^mpich2')
@ -142,58 +135,55 @@ def test_concretize_with_restricted_virtual(self):
concrete = self.check_concretize('mpileaks ^mpich2@1.3.1:1.4')
self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.3.1:1.4'))
def test_concretize_with_provides_when(self):
"""Make sure insufficient versions of MPI are not in providers list when
we ask for some advanced version.
"""
self.assertTrue(not any(spec.satisfies('mpich2@:1.0')
for spec in spack.repo.providers_for('mpi@2.1')))
self.assertTrue(
not any(spec.satisfies('mpich2@:1.0')
for spec in spack.repo.providers_for('mpi@2.1')))
self.assertTrue(not any(spec.satisfies('mpich2@:1.1')
for spec in spack.repo.providers_for('mpi@2.2')))
self.assertTrue(
not any(spec.satisfies('mpich2@:1.1')
for spec in spack.repo.providers_for('mpi@2.2')))
self.assertTrue(not any(spec.satisfies('mpich2@:1.1')
for spec in spack.repo.providers_for('mpi@2.2')))
self.assertTrue(
not any(spec.satisfies('mpich@:1')
for spec in spack.repo.providers_for('mpi@2')))
self.assertTrue(not any(spec.satisfies('mpich@:1')
for spec in spack.repo.providers_for('mpi@2')))
self.assertTrue(not any(spec.satisfies('mpich@:1')
for spec in spack.repo.providers_for('mpi@3')))
self.assertTrue(not any(spec.satisfies('mpich2')
for spec in spack.repo.providers_for('mpi@3')))
self.assertTrue(
not any(spec.satisfies('mpich@:1')
for spec in spack.repo.providers_for('mpi@3')))
self.assertTrue(
not any(spec.satisfies('mpich2')
for spec in spack.repo.providers_for('mpi@3')))
def test_concretize_two_virtuals(self):
"""Test a package with multiple virtual dependencies."""
s = Spec('hypre').concretize()
Spec('hypre').concretize()
def test_concretize_two_virtuals_with_one_bound(self):
"""Test a package with multiple virtual dependencies and one preset."""
s = Spec('hypre ^openblas').concretize()
Spec('hypre ^openblas').concretize()
def test_concretize_two_virtuals_with_two_bound(self):
"""Test a package with multiple virtual dependencies and two of them preset."""
s = Spec('hypre ^openblas ^netlib-lapack').concretize()
"""Test a package with multiple virtual deps and two of them preset."""
Spec('hypre ^openblas ^netlib-lapack').concretize()
def test_concretize_two_virtuals_with_dual_provider(self):
"""Test a package with multiple virtual dependencies and force a provider
that provides both."""
s = Spec('hypre ^openblas-with-lapack').concretize()
Spec('hypre ^openblas-with-lapack').concretize()
def test_concretize_two_virtuals_with_dual_provider_and_a_conflict(self):
"""Test a package with multiple virtual dependencies and force a provider
that provides both, and another conflicting package that provides one."""
"""Test a package with multiple virtual dependencies and force a
provider that provides both, and another conflicting package that
provides one.
"""
s = Spec('hypre ^openblas-with-lapack ^netlib-lapack')
self.assertRaises(spack.spec.MultipleProviderError, s.concretize)
def test_virtual_is_fully_expanded_for_callpath(self):
# force dependence on fake "zmpi" by asking for MPI 10.0
spec = Spec('callpath ^mpi@10.0')
@ -210,7 +200,6 @@ def test_virtual_is_fully_expanded_for_callpath(self):
self.assertTrue('fake' in spec._dependencies['zmpi'].spec)
def test_virtual_is_fully_expanded_for_mpileaks(self):
spec = Spec('mpileaks ^mpi@10.0')
self.assertTrue('mpi' in spec._dependencies)
@ -220,23 +209,24 @@ def test_virtual_is_fully_expanded_for_mpileaks(self):
self.assertTrue('zmpi' in spec._dependencies)
self.assertTrue('callpath' in spec._dependencies)
self.assertTrue('zmpi' in spec._dependencies['callpath'].
spec._dependencies)
self.assertTrue('fake' in spec._dependencies['callpath'].
spec._dependencies['zmpi'].
spec._dependencies)
self.assertTrue(
'zmpi' in spec._dependencies['callpath']
.spec._dependencies)
self.assertTrue(
'fake' in spec._dependencies['callpath']
.spec._dependencies['zmpi']
.spec._dependencies)
self.assertTrue(all(not 'mpi' in d._dependencies for d in spec.traverse()))
self.assertTrue(
all('mpi' not in d._dependencies for d in spec.traverse()))
self.assertTrue('zmpi' in spec)
self.assertTrue('mpi' in spec)
def test_my_dep_depends_on_provider_of_my_virtual_dep(self):
spec = Spec('indirect_mpich')
spec.normalize()
spec.concretize()
def test_compiler_inheritance(self):
spec = Spec('mpileaks')
spec.normalize()
@ -248,26 +238,26 @@ def test_compiler_inheritance(self):
self.assertTrue(spec['libdwarf'].compiler.satisfies('clang'))
self.assertTrue(spec['libelf'].compiler.satisfies('clang'))
def test_external_package(self):
spec = Spec('externaltool%gcc')
spec.concretize()
self.assertEqual(spec['externaltool'].external, '/path/to/external_tool')
self.assertEqual(
spec['externaltool'].external, '/path/to/external_tool')
self.assertFalse('externalprereq' in spec)
self.assertTrue(spec['externaltool'].compiler.satisfies('gcc'))
def test_external_package_module(self):
# No tcl modules on darwin/linux machines
# TODO: improved way to check for this.
if (spack.architecture.platform().name == 'darwin' or
spack.architecture.platform().name == 'linux'):
platform = spack.architecture.platform().name
if (platform == 'darwin' or platform == 'linux'):
return
spec = Spec('externalmodule')
spec.concretize()
self.assertEqual(spec['externalmodule'].external_module, 'external-module')
self.assertEqual(
spec['externalmodule'].external_module, 'external-module')
self.assertFalse('externalprereq' in spec)
self.assertTrue(spec['externalmodule'].compiler.satisfies('gcc'))
@ -280,16 +270,16 @@ def test_nobuild_package(self):
got_error = True
self.assertTrue(got_error)
def test_external_and_virtual(self):
spec = Spec('externaltest')
spec.concretize()
self.assertEqual(spec['externaltool'].external, '/path/to/external_tool')
self.assertEqual(spec['stuff'].external, '/path/to/external_virtual_gcc')
self.assertEqual(
spec['externaltool'].external, '/path/to/external_tool')
self.assertEqual(
spec['stuff'].external, '/path/to/external_virtual_gcc')
self.assertTrue(spec['externaltool'].compiler.satisfies('gcc'))
self.assertTrue(spec['stuff'].compiler.satisfies('gcc'))
def test_find_spec_parents(self):
"""Tests the spec finding logic used by concretization. """
s = Spec('a +foo',
@ -300,7 +290,6 @@ def test_find_spec_parents(self):
self.assertEqual('a', find_spec(s['b'], lambda s: '+foo' in s).name)
def test_find_spec_children(self):
s = Spec('a',
Spec('b +foo',
@ -315,7 +304,6 @@ def test_find_spec_children(self):
Spec('e +foo'))
self.assertEqual('c', find_spec(s['b'], lambda s: '+foo' in s).name)
def test_find_spec_sibling(self):
s = Spec('a',
Spec('b +foo',
@ -333,7 +321,6 @@ def test_find_spec_sibling(self):
Spec('f +foo')))
self.assertEqual('f', find_spec(s['b'], lambda s: '+foo' in s).name)
def test_find_spec_self(self):
s = Spec('a',
Spec('b +foo',
@ -342,7 +329,6 @@ def test_find_spec_self(self):
Spec('e'))
self.assertEqual('b', find_spec(s['b'], lambda s: '+foo' in s).name)
def test_find_spec_none(self):
s = Spec('a',
Spec('b',
@ -351,7 +337,6 @@ def test_find_spec_none(self):
Spec('e'))
self.assertEqual(None, find_spec(s['b'], lambda s: '+foo' in s))
def test_compiler_child(self):
s = Spec('mpileaks%clang ^dyninst%gcc')
s.concretize()

View file

@ -31,7 +31,6 @@
import spack
from llnl.util.filesystem import join_path
from llnl.util.lock import *
from llnl.util.tty.colify import colify
from spack.test.mock_database import MockDatabase
@ -88,26 +87,28 @@ def test_010_all_install_sanity(self):
# query specs with multiple configurations
mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
self.assertEqual(len(mpileaks_specs), 3)
self.assertEqual(len(callpath_specs), 3)
self.assertEqual(len(mpi_specs), 3)
# query specs with single configurations
dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')]
dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')]
libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')]
libelf_specs = [s for s in all_specs if s.satisfies('libelf')]
libelf_specs = [s for s in all_specs if s.satisfies('libelf')]
self.assertEqual(len(dyninst_specs), 1)
self.assertEqual(len(libdwarf_specs), 1)
self.assertEqual(len(libelf_specs), 1)
# Query by dependency
self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]), 1)
self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]), 1)
self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]), 1)
self.assertEqual(
len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]), 1)
self.assertEqual(
len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]), 1)
self.assertEqual(
len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]), 1)
def test_015_write_and_read(self):
# write and read DB
@ -122,7 +123,6 @@ def test_015_write_and_read(self):
self.assertEqual(new_rec.path, rec.path)
self.assertEqual(new_rec.installed, rec.installed)
def _check_db_sanity(self):
"""Utiilty function to check db against install layout."""
expected = sorted(spack.install_layout.all_specs())
@ -132,12 +132,10 @@ def _check_db_sanity(self):
for e, a in zip(expected, actual):
self.assertEqual(e, a)
def test_020_db_sanity(self):
"""Make sure query() returns what's actually in the db."""
self._check_db_sanity()
def test_030_db_sanity_from_another_process(self):
def read_and_modify():
self._check_db_sanity() # check that other process can read DB
@ -152,30 +150,28 @@ def read_and_modify():
with self.installed_db.read_transaction():
self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 0)
def test_040_ref_counts(self):
"""Ensure that we got ref counts right when we read the DB."""
self.installed_db._check_ref_counts()
def test_050_basic_query(self):
"""Ensure that querying the database is consistent with what is installed."""
"""Ensure querying database is consistent with what is installed."""
# query everything
self.assertEqual(len(spack.installed_db.query()), 13)
# query specs with multiple configurations
mpileaks_specs = self.installed_db.query('mpileaks')
callpath_specs = self.installed_db.query('callpath')
mpi_specs = self.installed_db.query('mpi')
mpi_specs = self.installed_db.query('mpi')
self.assertEqual(len(mpileaks_specs), 3)
self.assertEqual(len(callpath_specs), 3)
self.assertEqual(len(mpi_specs), 3)
# query specs with single configurations
dyninst_specs = self.installed_db.query('dyninst')
dyninst_specs = self.installed_db.query('dyninst')
libdwarf_specs = self.installed_db.query('libdwarf')
libelf_specs = self.installed_db.query('libelf')
libelf_specs = self.installed_db.query('libelf')
self.assertEqual(len(dyninst_specs), 1)
self.assertEqual(len(libdwarf_specs), 1)
@ -186,7 +182,6 @@ def test_050_basic_query(self):
self.assertEqual(len(self.installed_db.query('mpileaks ^mpich2')), 1)
self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 1)
def _check_remove_and_add_package(self, spec):
"""Remove a spec from the DB, then add it and make sure everything's
still ok once it is added. This checks that it was
@ -215,15 +210,12 @@ def _check_remove_and_add_package(self, spec):
self._check_db_sanity()
self.installed_db._check_ref_counts()
def test_060_remove_and_add_root_package(self):
self._check_remove_and_add_package('mpileaks ^mpich')
def test_070_remove_and_add_dependency_package(self):
self._check_remove_and_add_package('dyninst')
def test_080_root_ref_counts(self):
rec = self.installed_db.get_record('mpileaks ^mpich')
@ -231,45 +223,89 @@ def test_080_root_ref_counts(self):
self.installed_db.remove('mpileaks ^mpich')
# record no longer in DB
self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), [])
self.assertEqual(
self.installed_db.query('mpileaks ^mpich', installed=any), [])
# record's deps have updated ref_counts
self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 0)
self.assertEqual(
self.installed_db.get_record('callpath ^mpich').ref_count, 0)
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 1)
# put the spec back
# Put the spec back
self.installed_db.add(rec.spec, rec.path)
# record is present again
self.assertEqual(len(self.installed_db.query('mpileaks ^mpich', installed=any)), 1)
self.assertEqual(
len(self.installed_db.query('mpileaks ^mpich', installed=any)), 1)
# dependencies have ref counts updated
self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 1)
self.assertEqual(
self.installed_db.get_record('callpath ^mpich').ref_count, 1)
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2)
def test_090_non_root_ref_counts(self):
mpileaks_mpich_rec = self.installed_db.get_record('mpileaks ^mpich')
callpath_mpich_rec = self.installed_db.get_record('callpath ^mpich')
self.installed_db.get_record('mpileaks ^mpich')
self.installed_db.get_record('callpath ^mpich')
# "force remove" a non-root spec from the DB
self.installed_db.remove('callpath ^mpich')
# record still in DB but marked uninstalled
self.assertEqual(self.installed_db.query('callpath ^mpich', installed=True), [])
self.assertEqual(len(self.installed_db.query('callpath ^mpich', installed=any)), 1)
self.assertEqual(
self.installed_db.query('callpath ^mpich', installed=True), [])
self.assertEqual(
len(self.installed_db.query('callpath ^mpich', installed=any)), 1)
# record and its deps have same ref_counts
self.assertEqual(self.installed_db.get_record('callpath ^mpich', installed=any).ref_count, 1)
self.assertEqual(self.installed_db.get_record(
'callpath ^mpich', installed=any).ref_count, 1)
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2)
# remove only dependent of uninstalled callpath record
self.installed_db.remove('mpileaks ^mpich')
# record and parent are completely gone.
self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), [])
self.assertEqual(self.installed_db.query('callpath ^mpich', installed=any), [])
self.assertEqual(
self.installed_db.query('mpileaks ^mpich', installed=any), [])
self.assertEqual(
self.installed_db.query('callpath ^mpich', installed=any), [])
# mpich ref count updated properly.
mpich_rec = self.installed_db.get_record('mpich')
self.assertEqual(mpich_rec.ref_count, 0)
def test_100_no_write_with_exception_on_remove(self):
def fail_while_writing():
with self.installed_db.write_transaction():
self._mock_remove('mpileaks ^zmpi')
raise Exception()
with self.installed_db.read_transaction():
self.assertEqual(
len(self.installed_db.query('mpileaks ^zmpi', installed=any)),
1)
self.assertRaises(Exception, fail_while_writing)
# reload DB and make sure zmpi is still there.
with self.installed_db.read_transaction():
self.assertEqual(
len(self.installed_db.query('mpileaks ^zmpi', installed=any)),
1)
def test_110_no_write_with_exception_on_install(self):
def fail_while_writing():
with self.installed_db.write_transaction():
self._mock_install('cmake')
raise Exception()
with self.installed_db.read_transaction():
self.assertEqual(
self.installed_db.query('cmake', installed=any), [])
self.assertRaises(Exception, fail_while_writing)
# reload DB and make sure cmake was not written.
with self.installed_db.read_transaction():
self.assertEqual(
self.installed_db.query('cmake', installed=any), [])

View file

@ -0,0 +1,83 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
Test Spack's FileCache.
"""
import os
import shutil
import tempfile
import unittest
from spack.file_cache import FileCache
class FileCacheTest(unittest.TestCase):
"""Ensure that a file cache can properly write to a file and recover its
contents."""
def setUp(self):
self.scratch_dir = tempfile.mkdtemp()
self.cache = FileCache(self.scratch_dir)
def tearDown(self):
shutil.rmtree(self.scratch_dir)
def test_write_and_read_cache_file(self):
"""Test writing then reading a cached file."""
with self.cache.write_transaction('test.yaml') as (old, new):
self.assertTrue(old is None)
self.assertTrue(new is not None)
new.write("foobar\n")
with self.cache.read_transaction('test.yaml') as stream:
text = stream.read()
self.assertEqual("foobar\n", text)
def test_remove(self):
"""Test removing an entry from the cache."""
self.test_write_and_write_cache_file()
self.cache.remove('test.yaml')
self.assertFalse(os.path.exists(self.cache.cache_path('test.yaml')))
self.assertFalse(os.path.exists(self.cache._lock_path('test.yaml')))
def test_write_and_write_cache_file(self):
"""Test two write transactions on a cached file."""
with self.cache.write_transaction('test.yaml') as (old, new):
self.assertTrue(old is None)
self.assertTrue(new is not None)
new.write("foobar\n")
with self.cache.write_transaction('test.yaml') as (old, new):
self.assertTrue(old is not None)
text = old.read()
self.assertEqual("foobar\n", text)
self.assertTrue(new is not None)
new.write("barbaz\n")
with self.cache.read_transaction('test.yaml') as stream:
text = stream.read()
self.assertEqual("barbaz\n", text)

View file

@ -46,21 +46,21 @@ def setUp(self):
self.lock_path = join_path(self.tempdir, 'lockfile')
touch(self.lock_path)
def tearDown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
shutil.rmtree(self.tempdir, ignore_errors=True)
def multiproc_test(self, *functions):
"""Order some processes using simple barrier synchronization."""
b = Barrier(len(functions), timeout=barrier_timeout)
procs = [Process(target=f, args=(b,)) for f in functions]
for p in procs: p.start()
for p in procs:
p.start()
for p in procs:
p.join()
self.assertEqual(p.exitcode, 0)
#
# Process snippets below can be composed into tests.
#
@ -68,27 +68,26 @@ def acquire_write(self, barrier):
lock = Lock(self.lock_path)
lock.acquire_write() # grab exclusive lock
barrier.wait()
barrier.wait() # hold the lock until exception raises in other procs.
barrier.wait() # hold the lock until exception raises in other procs.
def acquire_read(self, barrier):
lock = Lock(self.lock_path)
lock.acquire_read() # grab shared lock
barrier.wait()
barrier.wait() # hold the lock until exception raises in other procs.
barrier.wait() # hold the lock until exception raises in other procs.
def timeout_write(self, barrier):
lock = Lock(self.lock_path)
barrier.wait() # wait for lock acquire in first process
barrier.wait() # wait for lock acquire in first process
self.assertRaises(LockError, lock.acquire_write, 0.1)
barrier.wait()
def timeout_read(self, barrier):
lock = Lock(self.lock_path)
barrier.wait() # wait for lock acquire in first process
barrier.wait() # wait for lock acquire in first process
self.assertRaises(LockError, lock.acquire_read, 0.1)
barrier.wait()
#
# Test that exclusive locks on other processes time out when an
# exclusive lock is held.
@ -97,11 +96,13 @@ def test_write_lock_timeout_on_write(self):
self.multiproc_test(self.acquire_write, self.timeout_write)
def test_write_lock_timeout_on_write_2(self):
self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write)
self.multiproc_test(
self.acquire_write, self.timeout_write, self.timeout_write)
def test_write_lock_timeout_on_write_3(self):
self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write, self.timeout_write)
self.multiproc_test(
self.acquire_write, self.timeout_write, self.timeout_write,
self.timeout_write)
#
# Test that shared locks on other processes time out when an
@ -111,11 +112,13 @@ def test_read_lock_timeout_on_write(self):
self.multiproc_test(self.acquire_write, self.timeout_read)
def test_read_lock_timeout_on_write_2(self):
self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read)
self.multiproc_test(
self.acquire_write, self.timeout_read, self.timeout_read)
def test_read_lock_timeout_on_write_3(self):
self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read, self.timeout_read)
self.multiproc_test(
self.acquire_write, self.timeout_read, self.timeout_read,
self.timeout_read)
#
# Test that exclusive locks time out when shared locks are held.
@ -124,27 +127,35 @@ def test_write_lock_timeout_on_read(self):
self.multiproc_test(self.acquire_read, self.timeout_write)
def test_write_lock_timeout_on_read_2(self):
self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write)
self.multiproc_test(
self.acquire_read, self.timeout_write, self.timeout_write)
def test_write_lock_timeout_on_read_3(self):
self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write, self.timeout_write)
self.multiproc_test(
self.acquire_read, self.timeout_write, self.timeout_write,
self.timeout_write)
#
# Test that exclusive locks time while lots of shared locks are held.
#
def test_write_lock_timeout_with_multiple_readers_2_1(self):
self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write)
self.multiproc_test(
self.acquire_read, self.acquire_read, self.timeout_write)
def test_write_lock_timeout_with_multiple_readers_2_2(self):
self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write)
self.multiproc_test(
self.acquire_read, self.acquire_read, self.timeout_write,
self.timeout_write)
def test_write_lock_timeout_with_multiple_readers_3_1(self):
self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write)
self.multiproc_test(
self.acquire_read, self.acquire_read, self.acquire_read,
self.timeout_write)
def test_write_lock_timeout_with_multiple_readers_3_2(self):
self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write)
self.multiproc_test(
self.acquire_read, self.acquire_read, self.acquire_read,
self.timeout_write, self.timeout_write)
#
# Longer test case that ensures locks are reusable. Ordering is
@ -155,110 +166,281 @@ def p1(barrier):
lock = Lock(self.lock_path)
lock.acquire_write()
barrier.wait() # ---------------------------------------- 1
barrier.wait() # ---------------------------------------- 1
# others test timeout
barrier.wait() # ---------------------------------------- 2
barrier.wait() # ---------------------------------------- 2
lock.release_write() # release and others acquire read
barrier.wait() # ---------------------------------------- 3
barrier.wait() # ---------------------------------------- 3
self.assertRaises(LockError, lock.acquire_write, 0.1)
lock.acquire_read()
barrier.wait() # ---------------------------------------- 4
barrier.wait() # ---------------------------------------- 4
lock.release_read()
barrier.wait() # ---------------------------------------- 5
barrier.wait() # ---------------------------------------- 5
# p2 upgrades read to write
barrier.wait() # ---------------------------------------- 6
barrier.wait() # ---------------------------------------- 6
self.assertRaises(LockError, lock.acquire_write, 0.1)
self.assertRaises(LockError, lock.acquire_read, 0.1)
barrier.wait() # ---------------------------------------- 7
barrier.wait() # ---------------------------------------- 7
# p2 releases write and read
barrier.wait() # ---------------------------------------- 8
barrier.wait() # ---------------------------------------- 8
# p3 acquires read
barrier.wait() # ---------------------------------------- 9
barrier.wait() # ---------------------------------------- 9
# p3 upgrades read to write
barrier.wait() # ---------------------------------------- 10
barrier.wait() # ---------------------------------------- 10
self.assertRaises(LockError, lock.acquire_write, 0.1)
self.assertRaises(LockError, lock.acquire_read, 0.1)
barrier.wait() # ---------------------------------------- 11
barrier.wait() # ---------------------------------------- 11
# p3 releases locks
barrier.wait() # ---------------------------------------- 12
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
barrier.wait() # ---------------------------------------- 13
lock.release_read()
def p2(barrier):
lock = Lock(self.lock_path)
# p1 acquires write
barrier.wait() # ---------------------------------------- 1
barrier.wait() # ---------------------------------------- 1
self.assertRaises(LockError, lock.acquire_write, 0.1)
self.assertRaises(LockError, lock.acquire_read, 0.1)
barrier.wait() # ---------------------------------------- 2
barrier.wait() # ---------------------------------------- 2
lock.acquire_read()
barrier.wait() # ---------------------------------------- 3
barrier.wait() # ---------------------------------------- 3
# p1 tests shared read
barrier.wait() # ---------------------------------------- 4
barrier.wait() # ---------------------------------------- 4
# others release reads
barrier.wait() # ---------------------------------------- 5
barrier.wait() # ---------------------------------------- 5
lock.acquire_write() # upgrade read to write
barrier.wait() # ---------------------------------------- 6
lock.acquire_write() # upgrade read to write
barrier.wait() # ---------------------------------------- 6
# others test timeout
barrier.wait() # ---------------------------------------- 7
barrier.wait() # ---------------------------------------- 7
lock.release_write() # release read AND write (need both)
lock.release_read()
barrier.wait() # ---------------------------------------- 8
barrier.wait() # ---------------------------------------- 8
# p3 acquires read
barrier.wait() # ---------------------------------------- 9
barrier.wait() # ---------------------------------------- 9
# p3 upgrades read to write
barrier.wait() # ---------------------------------------- 10
barrier.wait() # ---------------------------------------- 10
self.assertRaises(LockError, lock.acquire_write, 0.1)
self.assertRaises(LockError, lock.acquire_read, 0.1)
barrier.wait() # ---------------------------------------- 11
barrier.wait() # ---------------------------------------- 11
# p3 releases locks
barrier.wait() # ---------------------------------------- 12
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
barrier.wait() # ---------------------------------------- 13
lock.release_read()
def p3(barrier):
lock = Lock(self.lock_path)
# p1 acquires write
barrier.wait() # ---------------------------------------- 1
barrier.wait() # ---------------------------------------- 1
self.assertRaises(LockError, lock.acquire_write, 0.1)
self.assertRaises(LockError, lock.acquire_read, 0.1)
barrier.wait() # ---------------------------------------- 2
barrier.wait() # ---------------------------------------- 2
lock.acquire_read()
barrier.wait() # ---------------------------------------- 3
barrier.wait() # ---------------------------------------- 3
# p1 tests shared read
barrier.wait() # ---------------------------------------- 4
barrier.wait() # ---------------------------------------- 4
lock.release_read()
barrier.wait() # ---------------------------------------- 5
barrier.wait() # ---------------------------------------- 5
# p2 upgrades read to write
barrier.wait() # ---------------------------------------- 6
barrier.wait() # ---------------------------------------- 6
self.assertRaises(LockError, lock.acquire_write, 0.1)
self.assertRaises(LockError, lock.acquire_read, 0.1)
barrier.wait() # ---------------------------------------- 7
barrier.wait() # ---------------------------------------- 7
# p2 releases write & read
barrier.wait() # ---------------------------------------- 8
barrier.wait() # ---------------------------------------- 8
lock.acquire_read()
barrier.wait() # ---------------------------------------- 9
barrier.wait() # ---------------------------------------- 9
lock.acquire_write()
barrier.wait() # ---------------------------------------- 10
barrier.wait() # ---------------------------------------- 10
# others test timeout
barrier.wait() # ---------------------------------------- 11
barrier.wait() # ---------------------------------------- 11
lock.release_read() # release read AND write in opposite
lock.release_write() # order from before on p2
barrier.wait() # ---------------------------------------- 12
barrier.wait() # ---------------------------------------- 12
lock.acquire_read()
barrier.wait() # ---------------------------------------- 13
barrier.wait() # ---------------------------------------- 13
lock.release_read()
self.multiproc_test(p1, p2, p3)
def test_transaction(self):
def enter_fn():
vals['entered'] = True
def exit_fn(t, v, tb):
vals['exited'] = True
vals['exception'] = (t or v or tb)
lock = Lock(self.lock_path)
vals = {'entered': False, 'exited': False, 'exception': False}
with ReadTransaction(lock, enter_fn, exit_fn):
pass
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertFalse(vals['exception'])
vals = {'entered': False, 'exited': False, 'exception': False}
with WriteTransaction(lock, enter_fn, exit_fn):
pass
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertFalse(vals['exception'])
def test_transaction_with_exception(self):
def enter_fn():
vals['entered'] = True
def exit_fn(t, v, tb):
vals['exited'] = True
vals['exception'] = (t or v or tb)
lock = Lock(self.lock_path)
def do_read_with_exception():
with ReadTransaction(lock, enter_fn, exit_fn):
raise Exception()
def do_write_with_exception():
with WriteTransaction(lock, enter_fn, exit_fn):
raise Exception()
vals = {'entered': False, 'exited': False, 'exception': False}
self.assertRaises(Exception, do_read_with_exception)
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertTrue(vals['exception'])
vals = {'entered': False, 'exited': False, 'exception': False}
self.assertRaises(Exception, do_write_with_exception)
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertTrue(vals['exception'])
def test_transaction_with_context_manager(self):
class TestContextManager(object):
def __enter__(self):
vals['entered'] = True
def __exit__(self, t, v, tb):
vals['exited'] = True
vals['exception'] = (t or v or tb)
def exit_fn(t, v, tb):
vals['exited_fn'] = True
vals['exception_fn'] = (t or v or tb)
lock = Lock(self.lock_path)
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False}
with ReadTransaction(lock, TestContextManager, exit_fn):
pass
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertFalse(vals['exception'])
self.assertTrue(vals['exited_fn'])
self.assertFalse(vals['exception_fn'])
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False}
with ReadTransaction(lock, TestContextManager):
pass
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertFalse(vals['exception'])
self.assertFalse(vals['exited_fn'])
self.assertFalse(vals['exception_fn'])
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False}
with WriteTransaction(lock, TestContextManager, exit_fn):
pass
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertFalse(vals['exception'])
self.assertTrue(vals['exited_fn'])
self.assertFalse(vals['exception_fn'])
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False}
with WriteTransaction(lock, TestContextManager):
pass
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertFalse(vals['exception'])
self.assertFalse(vals['exited_fn'])
self.assertFalse(vals['exception_fn'])
def test_transaction_with_context_manager_and_exception(self):
class TestContextManager(object):
def __enter__(self):
vals['entered'] = True
def __exit__(self, t, v, tb):
vals['exited'] = True
vals['exception'] = (t or v or tb)
def exit_fn(t, v, tb):
vals['exited_fn'] = True
vals['exception_fn'] = (t or v or tb)
lock = Lock(self.lock_path)
def do_read_with_exception(exit_fn):
with ReadTransaction(lock, TestContextManager, exit_fn):
raise Exception()
def do_write_with_exception(exit_fn):
with WriteTransaction(lock, TestContextManager, exit_fn):
raise Exception()
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False}
self.assertRaises(Exception, do_read_with_exception, exit_fn)
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertTrue(vals['exception'])
self.assertTrue(vals['exited_fn'])
self.assertTrue(vals['exception_fn'])
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False}
self.assertRaises(Exception, do_read_with_exception, None)
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertTrue(vals['exception'])
self.assertFalse(vals['exited_fn'])
self.assertFalse(vals['exception_fn'])
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False}
self.assertRaises(Exception, do_write_with_exception, exit_fn)
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertTrue(vals['exception'])
self.assertTrue(vals['exited_fn'])
self.assertTrue(vals['exception_fn'])
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False}
self.assertRaises(Exception, do_write_with_exception, None)
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertTrue(vals['exception'])
self.assertFalse(vals['exited_fn'])
self.assertFalse(vals['exception_fn'])

View file

@ -95,8 +95,10 @@ def setUp(self):
self._mock_install('mpileaks ^zmpi')
def tearDown(self):
for spec in spack.installed_db.query():
spec.package.do_uninstall(spec)
with spack.installed_db.write_transaction():
for spec in spack.installed_db.query():
spec.package.do_uninstall(spec)
super(MockDatabase, self).tearDown()
shutil.rmtree(self.install_path)
spack.install_path = self.spack_install_path

View file

@ -0,0 +1,93 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""Tests for provider index cache files.
Tests assume that mock packages provide this:
{'blas': {
blas: set([netlib-blas, openblas, openblas-with-lapack])},
'lapack': {lapack: set([netlib-lapack, openblas-with-lapack])},
'mpi': {mpi@:1: set([mpich@:1]),
mpi@:2.0: set([mpich2]),
mpi@:2.1: set([mpich2@1.1:]),
mpi@:2.2: set([mpich2@1.2:]),
mpi@:3: set([mpich@3:]),
mpi@:10.0: set([zmpi])},
'stuff': {stuff: set([externalvirtual])}}
"""
from StringIO import StringIO
import spack
from spack.spec import Spec
from spack.provider_index import ProviderIndex
from spack.test.mock_packages_test import *
class ProviderIndexTest(MockPackagesTest):
def test_yaml_round_trip(self):
p = ProviderIndex(spack.repo.all_package_names())
ostream = StringIO()
p.to_yaml(ostream)
istream = StringIO(ostream.getvalue())
q = ProviderIndex.from_yaml(istream)
self.assertEqual(p, q)
def test_providers_for_simple(self):
p = ProviderIndex(spack.repo.all_package_names())
blas_providers = p.providers_for('blas')
self.assertTrue(Spec('netlib-blas') in blas_providers)
self.assertTrue(Spec('openblas') in blas_providers)
self.assertTrue(Spec('openblas-with-lapack') in blas_providers)
lapack_providers = p.providers_for('lapack')
self.assertTrue(Spec('netlib-lapack') in lapack_providers)
self.assertTrue(Spec('openblas-with-lapack') in lapack_providers)
def test_mpi_providers(self):
p = ProviderIndex(spack.repo.all_package_names())
mpi_2_providers = p.providers_for('mpi@2')
self.assertTrue(Spec('mpich2') in mpi_2_providers)
self.assertTrue(Spec('mpich@3:') in mpi_2_providers)
mpi_3_providers = p.providers_for('mpi@3')
self.assertTrue(Spec('mpich2') not in mpi_3_providers)
self.assertTrue(Spec('mpich@3:') in mpi_3_providers)
self.assertTrue(Spec('zmpi') in mpi_3_providers)
def test_equal(self):
p = ProviderIndex(spack.repo.all_package_names())
q = ProviderIndex(spack.repo.all_package_names())
self.assertEqual(p, q)
def test_copy(self):
p = ProviderIndex(spack.repo.all_package_names())
q = p.copy()
self.assertEqual(p, q)

View file

@ -30,41 +30,36 @@
from spack.spec import Spec
from spack.test.mock_packages_test import *
class SpecDagTest(MockPackagesTest):
class SpecYamlTest(MockPackagesTest):
def check_yaml_round_trip(self, spec):
yaml_text = spec.to_yaml()
spec_from_yaml = Spec.from_yaml(yaml_text)
self.assertTrue(spec.eq_dag(spec_from_yaml))
def test_simple_spec(self):
spec = Spec('mpileaks')
self.check_yaml_round_trip(spec)
def test_normal_spec(self):
spec = Spec('mpileaks+debug~opt')
spec.normalize()
self.check_yaml_round_trip(spec)
def test_ambiguous_version_spec(self):
spec = Spec('mpileaks@1.0:5.0,6.1,7.3+debug~opt')
spec.normalize()
self.check_yaml_round_trip(spec)
def test_concrete_spec(self):
spec = Spec('mpileaks+debug~opt')
spec.concretize()
self.check_yaml_round_trip(spec)
def test_yaml_subdag(self):
spec = Spec('mpileaks^mpich+debug')
spec.concretize()
yaml_spec = Spec.from_yaml(spec.to_yaml())
for dep in ('callpath', 'mpich', 'dyninst', 'libdwarf', 'libelf'):