Move provider cache to home directory and refactor Transactions

Major stuff:

- Created a FileCache for managing user cache files in Spack.  Currently just
  handles virtuals.

- Moved virtual cache from the repository to the home directory so that users do
  not need write access to Spack repositories to use them.

- Refactored `Transaction` class in `database.py` -- moved it to
  `LockTransaction` in `lock.py` and made it reusable by other classes.

Other additions:

- Added tests for file cache and transactions.

- Added a few more tests for database

- Fixed bug in DB where writes could happen even if exceptions were raised
  during a transaction.

- `spack uninstall` now attempts to repair the database when it discovers that a
  prefix doesn't exist but a DB record does.
This commit is contained in:
Todd Gamblin 2016-08-09 00:24:54 -07:00
parent 2042e9a6d8
commit 102ac7bcf1
18 changed files with 600 additions and 113 deletions

View file

@ -28,6 +28,9 @@
import time
import socket
__all__ = ['Lock', 'LockTransaction', 'WriteTransaction', 'ReadTransaction',
'LockError']
# Default timeout in seconds, after which locks will raise exceptions.
_default_timeout = 60
@ -63,7 +66,9 @@ def _lock(self, op, timeout):
fcntl.lockf(self._fd, op | fcntl.LOCK_NB)
if op == fcntl.LOCK_EX:
os.write(self._fd, "pid=%s,host=%s" % (os.getpid(), socket.getfqdn()))
os.write(
self._fd,
"pid=%s,host=%s" % (os.getpid(), socket.getfqdn()))
return
except IOError as error:
@ -170,6 +175,66 @@ def release_write(self):
return False
class LockTransaction(object):
"""Simple nested transaction context manager that uses a file lock.
This class can trigger actions when the lock is acquired for the
first time and released for the last.
If the acquire_fn returns a value, it is used as the return value for
__enter__, allowing it to be passed as the `as` argument of a `with`
statement.
If acquire_fn returns a context manager, *its* `__enter__` function will be
called in `__enter__` after acquire_fn, and its `__exit__` funciton will be
called before `release_fn` in `__exit__`, allowing you to nest a context
manager to be used along with the lock.
Timeout for lock is customizable.
"""
def __init__(self, lock, acquire_fn=None, release_fn=None,
timeout=_default_timeout):
self._lock = lock
self._timeout = timeout
self._acquire_fn = acquire_fn
self._release_fn = release_fn
self._as = None
def __enter__(self):
if self._enter() and self._acquire_fn:
self._as = self._acquire_fn()
if hasattr(self._as, '__enter__'):
return self._as.__enter__()
else:
return self._as
def __exit__(self, type, value, traceback):
if self._exit():
if self._as and hasattr(self._as, '__exit__'):
self._as.__exit__(type, value, traceback)
if self._release_fn:
self._release_fn(type, value, traceback)
if value:
raise value
class ReadTransaction(LockTransaction):
def _enter(self):
return self._lock.acquire_read(self._timeout)
def _exit(self):
return self._lock.release_read()
class WriteTransaction(LockTransaction):
def _enter(self):
return self._lock.acquire_write(self._timeout)
def _exit(self):
return self._lock.release_write()
class LockError(Exception):
"""Raised when an attempt to acquire a lock times out."""
pass

View file

@ -50,8 +50,15 @@
share_path = join_path(spack_root, "share", "spack")
cache_path = join_path(var_path, "cache")
# User configuration location
user_config_path = os.path.expanduser('~/.spack')
import spack.fetch_strategy
cache = spack.fetch_strategy.FsCache(cache_path)
fetch_cache = spack.fetch_strategy.FsCache(cache_path)
from spack.file_cache import FileCache
user_cache_path = join_path(user_config_path, 'cache')
user_cache = FileCache(user_cache_path)
prefix = spack_root
opt_path = join_path(prefix, "opt")

View file

@ -33,7 +33,11 @@ def setup_parser(subparser):
'-s', '--stage', action='store_true', default=True,
help="Remove all temporary build stages (default).")
subparser.add_argument(
'-c', '--cache', action='store_true', help="Remove cached downloads.")
'-d', '--downloads', action='store_true',
help="Remove cached downloads.")
subparser.add_argument(
'-u', '--user-cache', action='store_true',
help="Remove caches in user home directory. Includes virtual indices.")
subparser.add_argument(
'-a', '--all', action='store_true',
help="Remove all of the above.")
@ -49,4 +53,6 @@ def purge(parser, args):
if args.stage or args.all:
stage.purge()
if args.cache or args.all:
spack.cache.destroy()
spack.fetch_cache.destroy()
if args.user_cache or args.all:
spack.user_cache.destroy()

View file

@ -41,10 +41,10 @@ def setup_parser(subparser):
subparser.add_argument(
'-l', '--list', action='store_true', dest='list', help="Show available tests")
subparser.add_argument(
'--createXmlOutput', action='store_true', dest='createXmlOutput',
'--createXmlOutput', action='store_true', dest='createXmlOutput',
help="Create JUnit XML from test results")
subparser.add_argument(
'--xmlOutputDir', dest='xmlOutputDir',
'--xmlOutputDir', dest='xmlOutputDir',
help="Nose creates XML files in this directory")
subparser.add_argument(
'-v', '--verbose', action='store_true', dest='verbose',
@ -62,7 +62,7 @@ def fetcher(self, targetPath, digest):
class MockCacheFetcher(object):
def set_stage(self, stage):
pass
def fetch(self):
raise FetchError("Mock cache always fails for tests")
@ -82,8 +82,8 @@ def test(parser, args):
outputDir = join_path(os.getcwd(), "test-output")
else:
outputDir = os.path.abspath(args.xmlOutputDir)
if not os.path.exists(outputDir):
mkdirp(outputDir)
spack.cache = MockCache()
spack.fetch_cache = MockCache()
spack.test.run(args.names, outputDir, args.verbose)

View file

@ -184,7 +184,8 @@ def uninstall(parser, args):
uninstall_list = list(set(uninstall_list))
if has_error:
tty.die('You can use spack uninstall --dependents to uninstall these dependencies as well') # NOQA: ignore=E501
tty.die('You can use spack uninstall --dependents '
'to uninstall these dependencies as well')
if not args.yes_to_all:
tty.msg("The following packages will be uninstalled : ")

View file

@ -525,7 +525,7 @@ def clear(self):
ConfigScope('site', os.path.join(spack.etc_path, 'spack'))
"""User configuration can override both spack defaults and site config."""
ConfigScope('user', os.path.expanduser('~/.spack'))
ConfigScope('user', spack.user_config_path)
def highest_precedence_scope():

View file

@ -165,11 +165,11 @@ def __init__(self, root, db_dir=None):
def write_transaction(self, timeout=_db_lock_timeout):
"""Get a write lock context manager for use in a `with` block."""
return WriteTransaction(self, self._read, self._write, timeout)
return WriteTransaction(self.lock, self._read, self._write, timeout)
def read_transaction(self, timeout=_db_lock_timeout):
"""Get a read lock context manager for use in a `with` block."""
return ReadTransaction(self, self._read, None, timeout)
return ReadTransaction(self.lock, self._read, timeout=timeout)
def _write_to_yaml(self, stream):
"""Write out the databsae to a YAML file.
@ -352,12 +352,22 @@ def _check_ref_counts(self):
"Invalid ref_count: %s: %d (expected %d), in DB %s" %
(key, found, expected, self._index_path))
def _write(self):
def _write(self, type, value, traceback):
"""Write the in-memory database index to its file path.
Does no locking.
This is a helper function called by the WriteTransaction context
manager. If there is an exception while the write lock is active,
nothing will be written to the database file, but the in-memory database
*may* be left in an inconsistent state. It will be consistent after the
start of the next transaction, when it read from disk again.
This routine does no locking.
"""
# Do not write if exceptions were raised
if type is not None:
return
temp_file = self._index_path + (
'.%s.%s.temp' % (socket.getfqdn(), os.getpid()))
@ -589,49 +599,6 @@ def missing(self, spec):
return key in self._data and not self._data[key].installed
class _Transaction(object):
"""Simple nested transaction context manager that uses a file lock.
This class can trigger actions when the lock is acquired for the
first time and released for the last.
Timeout for lock is customizable.
"""
def __init__(self, db,
acquire_fn=None,
release_fn=None,
timeout=_db_lock_timeout):
self._db = db
self._timeout = timeout
self._acquire_fn = acquire_fn
self._release_fn = release_fn
def __enter__(self):
if self._enter() and self._acquire_fn:
self._acquire_fn()
def __exit__(self, type, value, traceback):
if self._exit() and self._release_fn:
self._release_fn()
class ReadTransaction(_Transaction):
def _enter(self):
return self._db.lock.acquire_read(self._timeout)
def _exit(self):
return self._db.lock.release_read()
class WriteTransaction(_Transaction):
def _enter(self):
return self._db.lock.acquire_write(self._timeout)
def _exit(self):
return self._db.lock.release_write()
class CorruptDatabaseError(SpackError):
def __init__(self, path, msg=''):
super(CorruptDatabaseError, self).__init__(

View file

@ -0,0 +1,181 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import shutil
from llnl.util.filesystem import *
from llnl.util.lock import *
import spack
from spack.error import SpackError
class FileCache(object):
"""This class manages cached data in the filesystem.
- Cache files are fetched and stored by unique keys. Keys can be relative
paths, so that thre can be some hierarchy in the cache.
- The FileCache handles locking cache files for reading and writing, so
client code need not manage locks for cache entries.
"""
def __init__(self, root):
"""Create a file cache object.
This will create the cache directory if it does not exist yet.
"""
self.root = root.rstrip(os.path.sep)
if not os.path.exists(self.root):
mkdirp(self.root)
self._locks = {}
def purge(self):
"""Remove all files under the cache root."""
for f in os.listdir(self.root):
path = join_path(self.root, f)
shutil.rmtree(f)
def cache_path(self, key):
"""Path to the file in the cache for a particular key."""
return join_path(self.root, key)
def _lock_path(self, key):
"""Path to the file in the cache for a particular key."""
keyfile = os.path.basename(key)
keydir = os.path.dirname(key)
return join_path(self.root, keydir, '.' + keyfile + '.lock')
def _get_lock(self, key):
"""Create a lock for a key, if necessary, and return a lock object."""
if key not in self._locks:
lock_file = self._lock_path(key)
if not os.path.exists(lock_file):
touch(lock_file)
self._locks[key] = Lock(lock_file)
return self._locks[key]
def init_entry(self, key):
"""Ensure we can access a cache file. Create a lock for it if needed.
Return whether the cache file exists yet or not.
"""
cache_path = self.cache_path(key)
exists = os.path.exists(cache_path)
if exists:
if not os.path.isfile(cache_path):
raise CacheError("Cache file is not a file: %s" % cache_path)
if not os.access(cache_path, os.R_OK|os.W_OK):
raise CacheError("Cannot access cache file: %s" % cache_path)
else:
# if the file is hierarchical, make parent directories
parent = os.path.dirname(cache_path)
if parent.rstrip(os.path.sep) != self.root:
mkdirp(parent)
if not os.access(parent, os.R_OK|os.W_OK):
raise CacheError("Cannot access cache directory: %s" % parent)
# ensure lock is created for this key
self._get_lock(key)
return exists
def read_transaction(self, key):
"""Get a read transaction on a file cache item.
Returns a ReadTransaction context manager and opens the cache file for
reading. You can use it like this:
with spack.user_cache.read_transaction(key) as cache_file:
cache_file.read()
"""
return ReadTransaction(
self._get_lock(key), lambda: open(self.cache_path(key)))
def write_transaction(self, key):
"""Get a write transaction on a file cache item.
Returns a WriteTransaction context manager that opens a temporary file
for writing. Once the context manager finishes, if nothing went wrong,
moves the file into place on top of the old file atomically.
"""
class WriteContextManager(object):
def __enter__(cm):
cm.orig_filename = self.cache_path(key)
cm.orig_file = None
if os.path.exists(cm.orig_filename):
cm.orig_file = open(cm.orig_filename, 'r')
cm.tmp_filename = self.cache_path(key) + '.tmp'
cm.tmp_file = open(cm.tmp_filename, 'w')
return cm.orig_file, cm.tmp_file
def __exit__(cm, type, value, traceback):
if cm.orig_file:
cm.orig_file.close()
cm.tmp_file.close()
if value:
# remove tmp on exception & raise it
shutil.rmtree(cm.tmp_filename, True)
raise value
else:
os.rename(cm.tmp_filename, cm.orig_filename)
return WriteTransaction(self._get_lock(key), WriteContextManager)
def mtime(self, key):
"""Return modification time of cache file, or 0 if it does not exist.
Time is in units returned by os.stat in the mtime field, which is
platform-dependent.
"""
if not self.init_entry(key):
return 0
else:
sinfo = os.stat(self.cache_path(key))
return sinfo.st_mtime
def remove(self, key):
lock = self._get_lock(key)
try:
lock.acquire_write()
os.unlink(self.cache_path(key))
finally:
lock.release_write()
os.unlink(self._lock_path(key))
class CacheError(SpackError): pass

View file

@ -520,7 +520,8 @@ def header(self):
def prerequisite(self, spec):
tty.warn('prerequisites: not supported by dotkit module files')
tty.warn('\tYou may want to check ~/.spack/modules.yaml')
tty.warn('\tYou may want to check %s/modules.yaml'
% spack.user_config_path)
return ''

View file

@ -1198,7 +1198,15 @@ def install(self, spec, prefix):
def do_uninstall(self, force=False):
if not self.installed:
raise InstallError(str(self.spec) + " is not installed.")
# prefix may not exist, but DB may be inconsistent. Try to fix by
# removing, but omit hooks.
specs = spack.installed_db.query(self.spec, installed=True)
if specs:
spack.installed_db.remove(specs[0])
tty.msg("Removed stale DB entry for %s" % self.spec.short_spec)
return
else:
raise InstallError(str(self.spec) + " is not installed.")
if not force:
dependents = self.installed_dependents

View file

@ -41,6 +41,7 @@
from llnl.util.lock import Lock
from llnl.util.filesystem import *
import spack
import spack.error
import spack.config
import spack.spec
@ -414,17 +415,6 @@ def check(condition, msg):
check(os.path.isdir(self.packages_path),
"No directory '%s' found in '%s'" % (repo_config_name, root))
self.index_file = join_path(self.root, repo_index_name)
check(not os.path.exists(self.index_file) or
(os.path.isfile(self.index_file) and os.access(self.index_file, os.R_OK|os.W_OK)),
"Cannot access repository index file in %s" % root)
# lock file for reading/writing the index
self._lock_path = join_path(self.root, 'lock')
if not os.path.exists(self._lock_path):
touch(self._lock_path)
self._lock = Lock(self._lock_path)
# Read configuration and validate namespace
config = self._read_config()
check('namespace' in config, '%s must define a namespace.'
@ -461,6 +451,8 @@ def check(condition, msg):
# make sure the namespace for packages in this repo exists.
self._create_namespace()
# Unique filename for cache of virtual dependency providers
self._cache_file = 'providers/%s-index.yaml' % self.namespace
def _create_namespace(self):
"""Create this repo's namespace module and insert it into sys.modules.
@ -658,21 +650,15 @@ def read():
self._provider_index = ProviderIndex.from_yaml(f)
# Read the old ProviderIndex, or make a new one.
index_existed = os.path.isfile(self.index_file)
key = self._cache_file
index_existed = spack.user_cache.init_entry(key)
if index_existed and not self._needs_update:
self._lock.acquire_read()
try:
read()
finally:
self._lock.release_read()
with spack.user_cache.read_transaction(key) as f:
self._provider_index = ProviderIndex.from_yaml(f)
else:
tmp = self.index_file + '.tmp'
self._lock.acquire_write()
try:
if index_existed:
with open(self.index_file) as f:
self._provider_index = ProviderIndex.from_yaml(f)
with spack.user_cache.write_transaction(key) as (old, new):
if old:
self._provider_index = ProviderIndex.from_yaml(old)
else:
self._provider_index = ProviderIndex()
@ -681,17 +667,7 @@ def read():
self._provider_index.remove_provider(namespaced_name)
self._provider_index.update(namespaced_name)
with open(tmp, 'w') as f:
self._provider_index.to_yaml(f)
os.rename(tmp, self.index_file)
except:
shutil.rmtree(tmp, ignore_errors=True)
raise
finally:
self._lock.release_write()
self._provider_index.to_yaml(new)
@property
@ -745,7 +721,7 @@ def filename_for_package_name(self, spec):
def _fast_package_check(self):
"""List packages in the repo and cehck whether index is up to date.
"""List packages in the repo and check whether index is up to date.
Both of these opreations require checking all `package.py`
files so we do them at the same time. We list the repo
@ -763,10 +739,7 @@ def _fast_package_check(self):
self._all_package_names = []
# Get index modification time.
index_mtime = 0
if os.path.exists(self.index_file):
sinfo = os.stat(self.index_file)
index_mtime = sinfo.st_mtime
index_mtime = spack.user_cache.mtime(self._cache_file)
for pkg_name in os.listdir(self.packages_path):
# Skip non-directories in the package root.
@ -774,8 +747,9 @@ def _fast_package_check(self):
# Warn about invalid names that look like packages.
if not valid_module_name(pkg_name):
tty.warn("Skipping package at %s. '%s' is not a valid Spack module name."
% (pkg_dir, pkg_name))
msg = ("Skipping package at %s. "
"'%s' is not a valid Spack module name.")
tty.warn(msg % (pkg_dir, pkg_name))
continue
# construct the file name from the directory

View file

@ -315,7 +315,8 @@ def fetch(self, mirror_only=False):
# Add URL strategies for all the mirrors with the digest
for url in urls:
fetchers.insert(0, fs.URLFetchStrategy(url, digest))
fetchers.insert(0, spack.cache.fetcher(self.mirror_path, digest))
fetchers.insert(0, spack.fetch_cache.fetcher(self.mirror_path,
digest))
# Look for the archive in list_url
package_name = os.path.dirname(self.mirror_path)
@ -365,7 +366,7 @@ def check(self):
self.fetcher.check()
def cache_local(self):
spack.cache.store(self.fetcher, self.mirror_path)
spack.fetch_cache.store(self.fetcher, self.mirror_path)
def expand_archive(self):
"""Changes to the stage directory and attempt to expand the downloaded

View file

@ -49,6 +49,7 @@
'database',
'directory_layout',
'environment',
'file_cache',
'git_fetch',
'hg_fetch',
'install',

View file

@ -273,3 +273,37 @@ def test_090_non_root_ref_counts(self):
# mpich ref count updated properly.
mpich_rec = self.installed_db.get_record('mpich')
self.assertEqual(mpich_rec.ref_count, 0)
def test_100_no_write_with_exception_on_remove(self):
def fail_while_writing():
with self.installed_db.write_transaction():
self._mock_remove('mpileaks ^zmpi')
raise Exception()
with self.installed_db.read_transaction():
self.assertEqual(
len(self.installed_db.query('mpileaks ^zmpi', installed=any)), 1)
self.assertRaises(Exception, fail_while_writing)
# reload DB and make sure zmpi is still there.
with self.installed_db.read_transaction():
self.assertEqual(
len(self.installed_db.query('mpileaks ^zmpi', installed=any)), 1)
def test_110_no_write_with_exception_on_install(self):
def fail_while_writing():
with self.installed_db.write_transaction():
self._mock_install('cmake')
raise Exception()
with self.installed_db.read_transaction():
self.assertEqual(
self.installed_db.query('cmake', installed=any), [])
self.assertRaises(Exception, fail_while_writing)
# reload DB and make sure cmake was not written.
with self.installed_db.read_transaction():
self.assertEqual(
self.installed_db.query('cmake', installed=any), [])

View file

@ -0,0 +1,84 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
Test Spack's FileCache.
"""
import os
import shutil
import tempfile
import unittest
import spack
from spack.file_cache import FileCache
class FileCacheTest(unittest.TestCase):
"""Ensure that a file cache can properly write to a file and recover its
contents."""
def setUp(self):
self.scratch_dir = tempfile.mkdtemp()
self.cache = FileCache(self.scratch_dir)
def tearDown(self):
shutil.rmtree(self.scratch_dir)
def test_write_and_read_cache_file(self):
"""Test writing then reading a cached file."""
with self.cache.write_transaction('test.yaml') as (old, new):
self.assertTrue(old is None)
self.assertTrue(new is not None)
new.write("foobar\n")
with self.cache.read_transaction('test.yaml') as stream:
text = stream.read()
self.assertEqual("foobar\n", text)
def test_remove(self):
"""Test removing an entry from the cache."""
self.test_write_and_write_cache_file()
self.cache.remove('test.yaml')
self.assertFalse(os.path.exists(self.cache.cache_path('test.yaml')))
self.assertFalse(os.path.exists(self.cache._lock_path('test.yaml')))
def test_write_and_write_cache_file(self):
"""Test two write transactions on a cached file."""
with self.cache.write_transaction('test.yaml') as (old, new):
self.assertTrue(old is None)
self.assertTrue(new is not None)
new.write("foobar\n")
with self.cache.write_transaction('test.yaml') as (old, new):
self.assertTrue(old is not None)
text = old.read()
self.assertEqual("foobar\n", text)
self.assertTrue(new is not None)
new.write("barbaz\n")
with self.cache.read_transaction('test.yaml') as stream:
text = stream.read()
self.assertEqual("barbaz\n", text)

View file

@ -187,7 +187,6 @@ def p1(barrier):
barrier.wait() # ---------------------------------------- 13
lock.release_read()
def p2(barrier):
lock = Lock(self.lock_path)
@ -224,7 +223,6 @@ def p2(barrier):
barrier.wait() # ---------------------------------------- 13
lock.release_read()
def p3(barrier):
lock = Lock(self.lock_path)
@ -262,3 +260,164 @@ def p3(barrier):
lock.release_read()
self.multiproc_test(p1, p2, p3)
def test_transaction(self):
def enter_fn():
vals['entered'] = True
def exit_fn(t, v, tb):
vals['exited'] = True
vals['exception'] = (t or v or tb)
lock = Lock(self.lock_path)
vals = {'entered': False, 'exited': False, 'exception': False }
with ReadTransaction(lock, enter_fn, exit_fn): pass
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertFalse(vals['exception'])
vals = {'entered': False, 'exited': False, 'exception': False }
with WriteTransaction(lock, enter_fn, exit_fn): pass
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertFalse(vals['exception'])
def test_transaction_with_exception(self):
def enter_fn():
vals['entered'] = True
def exit_fn(t, v, tb):
vals['exited'] = True
vals['exception'] = (t or v or tb)
lock = Lock(self.lock_path)
def do_read_with_exception():
with ReadTransaction(lock, enter_fn, exit_fn):
raise Exception()
def do_write_with_exception():
with WriteTransaction(lock, enter_fn, exit_fn):
raise Exception()
vals = {'entered': False, 'exited': False, 'exception': False }
self.assertRaises(Exception, do_read_with_exception)
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertTrue(vals['exception'])
vals = {'entered': False, 'exited': False, 'exception': False }
self.assertRaises(Exception, do_write_with_exception)
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertTrue(vals['exception'])
def test_transaction_with_context_manager(self):
class TestContextManager(object):
def __enter__(self):
vals['entered'] = True
def __exit__(self, t, v, tb):
vals['exited'] = True
vals['exception'] = (t or v or tb)
def exit_fn(t, v, tb):
vals['exited_fn'] = True
vals['exception_fn'] = (t or v or tb)
lock = Lock(self.lock_path)
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False }
with ReadTransaction(lock, TestContextManager, exit_fn): pass
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertFalse(vals['exception'])
self.assertTrue(vals['exited_fn'])
self.assertFalse(vals['exception_fn'])
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False }
with ReadTransaction(lock, TestContextManager): pass
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertFalse(vals['exception'])
self.assertFalse(vals['exited_fn'])
self.assertFalse(vals['exception_fn'])
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False }
with WriteTransaction(lock, TestContextManager, exit_fn): pass
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertFalse(vals['exception'])
self.assertTrue(vals['exited_fn'])
self.assertFalse(vals['exception_fn'])
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False }
with WriteTransaction(lock, TestContextManager): pass
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertFalse(vals['exception'])
self.assertFalse(vals['exited_fn'])
self.assertFalse(vals['exception_fn'])
def test_transaction_with_context_manager_and_exception(self):
class TestContextManager(object):
def __enter__(self):
vals['entered'] = True
def __exit__(self, t, v, tb):
vals['exited'] = True
vals['exception'] = (t or v or tb)
def exit_fn(t, v, tb):
vals['exited_fn'] = True
vals['exception_fn'] = (t or v or tb)
lock = Lock(self.lock_path)
def do_read_with_exception(exit_fn):
with ReadTransaction(lock, TestContextManager, exit_fn):
raise Exception()
def do_write_with_exception(exit_fn):
with WriteTransaction(lock, TestContextManager, exit_fn):
raise Exception()
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False }
self.assertRaises(Exception, do_read_with_exception, exit_fn)
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertTrue(vals['exception'])
self.assertTrue(vals['exited_fn'])
self.assertTrue(vals['exception_fn'])
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False }
self.assertRaises(Exception, do_read_with_exception, None)
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertTrue(vals['exception'])
self.assertFalse(vals['exited_fn'])
self.assertFalse(vals['exception_fn'])
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False }
self.assertRaises(Exception, do_write_with_exception, exit_fn)
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertTrue(vals['exception'])
self.assertTrue(vals['exited_fn'])
self.assertTrue(vals['exception_fn'])
vals = {'entered': False, 'exited': False, 'exited_fn': False,
'exception': False, 'exception_fn': False }
self.assertRaises(Exception, do_write_with_exception, None)
self.assertTrue(vals['entered'])
self.assertTrue(vals['exited'])
self.assertTrue(vals['exception'])
self.assertFalse(vals['exited_fn'])
self.assertFalse(vals['exception_fn'])

View file

@ -95,8 +95,10 @@ def setUp(self):
self._mock_install('mpileaks ^zmpi')
def tearDown(self):
for spec in spack.installed_db.query():
spec.package.do_uninstall(spec)
with spack.installed_db.write_transaction():
for spec in spack.installed_db.query():
spec.package.do_uninstall(spec)
super(MockDatabase, self).tearDown()
shutil.rmtree(self.install_path)
spack.install_path = self.spack_install_path

View file

@ -94,7 +94,3 @@ def test_copy(self):
p = ProviderIndex(spack.repo.all_package_names())
q = p.copy()
self.assertEqual(p, q)
def test_copy(self):
pass