macos: update build process to use spawn instead of fork (#18205)

Spack creates a separate process to do package installation. Different
operating systems and Python versions use different methods to create
it but up until Python 3.8 both Linux and Mac OS used "fork" (which
duplicates process memory, file descriptor table, etc.).

Python >= 3.8 on Mac OS prefers creating an entirely new process
(referred to as the "spawn" start method) because "fork" was found to
cause issues (in other words "spawn" is the default start method used
by multiprocessing.Process). Spack was dependent on the particular
behavior of fork to replicate process memory and transmit file
descriptors.

This PR refactors the Spack internals to support starting a child
process with the "spawn" method. To achieve this, it makes the
following changes:

- ensure that the package repository and other global state are
  transmitted to the child process
- ensure that file descriptors are transmitted to the child process in
  a way that works with multiprocessing and spawn
- make all the state needed for the build process and tests picklable
  (package, stage, etc.)
- move a number of locally-defined functions into global scope so that
  they can be pickled
- rework tests where needed to avoid using local functions

This PR also reworks sbang tests to work on macOS, where temporary
directories are deeper than the Linux sbang limit. We make the limit
platform-dependent (macOS supports 512-character shebangs)

See: #14102
This commit is contained in:
Peter Scheibel 2020-11-12 12:26:23 -08:00 committed by GitHub
parent 81cab3b271
commit bb42470211
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
27 changed files with 1052 additions and 563 deletions

View file

@ -12,13 +12,16 @@ on:
jobs: jobs:
build: build:
runs-on: macos-latest runs-on: macos-latest
strategy:
matrix:
python-version: [3.8, 3.9]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: actions/setup-python@v2 - uses: actions/setup-python@v2
with: with:
python-version: 3.9 python-version: ${{ matrix.python-version }}
- name: Install Python packages - name: Install Python packages
run: | run: |
pip install --upgrade pip six setuptools pip install --upgrade pip six setuptools

View file

@ -59,6 +59,8 @@ if 'ruamel.yaml' in sys.modules:
if 'ruamel' in sys.modules: if 'ruamel' in sys.modules:
del sys.modules['ruamel'] del sys.modules['ruamel']
# Once we've set up the system path, run the spack main method
import spack.main # noqa import spack.main # noqa
sys.exit(spack.main.main())
# Once we've set up the system path, run the spack main method
if __name__ == "__main__":
sys.exit(spack.main.main())

View file

@ -568,6 +568,12 @@ def instance(self):
return self._instance return self._instance
def __getattr__(self, name): def __getattr__(self, name):
# When unpickling Singleton objects, the 'instance' attribute may be
# requested but not yet set. The final 'getattr' line here requires
# 'instance'/'_instance' to be defined or it will enter an infinite
# loop, so protect against that here.
if name in ['_instance', 'instance']:
raise AttributeError()
return getattr(self.instance, name) return getattr(self.instance, name)
def __getitem__(self, name): def __getitem__(self, name):
@ -596,6 +602,8 @@ def __init__(self, ref_function):
self.ref_function = ref_function self.ref_function = ref_function
def __getattr__(self, name): def __getattr__(self, name):
if name == 'ref_function':
raise AttributeError()
return getattr(self.ref_function(), name) return getattr(self.ref_function(), name)
def __getitem__(self, name): def __getitem__(self, name):

View file

@ -21,7 +21,6 @@
from six import StringIO from six import StringIO
import llnl.util.tty as tty import llnl.util.tty as tty
from llnl.util.lang import fork_context
try: try:
import termios import termios
@ -237,6 +236,8 @@ def __exit__(self, exc_type, exception, traceback):
"""If termios was available, restore old settings.""" """If termios was available, restore old settings."""
if self.old_cfg: if self.old_cfg:
self._restore_default_terminal_settings() self._restore_default_terminal_settings()
if sys.version_info >= (3,):
atexit.unregister(self._restore_default_terminal_settings)
# restore SIGSTP and SIGCONT handlers # restore SIGSTP and SIGCONT handlers
if self.old_handlers: if self.old_handlers:
@ -288,6 +289,109 @@ def _file_descriptors_work(*streams):
return False return False
class FileWrapper(object):
"""Represents a file. Can be an open stream, a path to a file (not opened
yet), or neither. When unwrapped, it returns an open file (or file-like)
object.
"""
def __init__(self, file_like):
# This records whether the file-like object returned by "unwrap" is
# purely in-memory. In that case a subprocess will need to explicitly
# transmit the contents to the parent.
self.write_in_parent = False
self.file_like = file_like
if isinstance(file_like, string_types):
self.open = True
elif _file_descriptors_work(file_like):
self.open = False
else:
self.file_like = None
self.open = True
self.write_in_parent = True
self.file = None
def unwrap(self):
if self.open:
if self.file_like:
self.file = open(self.file_like, 'w')
else:
self.file = StringIO()
return self.file
else:
# We were handed an already-open file object. In this case we also
# will not actually close the object when requested to.
return self.file_like
def close(self):
if self.file:
self.file.close()
class MultiProcessFd(object):
"""Return an object which stores a file descriptor and can be passed as an
argument to a function run with ``multiprocessing.Process``, such that
the file descriptor is available in the subprocess."""
def __init__(self, fd):
self._connection = None
self._fd = None
if sys.version_info >= (3, 8):
self._connection = multiprocessing.connection.Connection(fd)
else:
self._fd = fd
@property
def fd(self):
if self._connection:
return self._connection._handle
else:
return self._fd
def close(self):
if self._connection:
self._connection.close()
else:
os.close(self._fd)
def close_connection_and_file(multiprocess_fd, file):
# MultiprocessFd is intended to transmit a FD
# to a child process, this FD is then opened to a Python File object
# (using fdopen). In >= 3.8, MultiprocessFd encapsulates a
# multiprocessing.connection.Connection; Connection closes the FD
# when it is deleted, and prints a warning about duplicate closure if
# it is not explicitly closed. In < 3.8, MultiprocessFd encapsulates a
# simple FD; closing the FD here appears to conflict with
# closure of the File object (in < 3.8 that is). Therefore this needs
# to choose whether to close the File or the Connection.
if sys.version_info >= (3, 8):
multiprocess_fd.close()
else:
file.close()
@contextmanager
def replace_environment(env):
"""Replace the current environment (`os.environ`) with `env`.
If `env` is empty (or None), this unsets all current environment
variables.
"""
env = env or {}
old_env = os.environ.copy()
try:
os.environ.clear()
for name, val in env.items():
os.environ[name] = val
yield
finally:
os.environ.clear()
for name, val in old_env.items():
os.environ[name] = val
class log_output(object): class log_output(object):
"""Context manager that logs its output to a file. """Context manager that logs its output to a file.
@ -324,7 +428,8 @@ class log_output(object):
work within test frameworks like nose and pytest. work within test frameworks like nose and pytest.
""" """
def __init__(self, file_like=None, echo=False, debug=0, buffer=False): def __init__(self, file_like=None, echo=False, debug=0, buffer=False,
env=None):
"""Create a new output log context manager. """Create a new output log context manager.
Args: Args:
@ -352,6 +457,7 @@ def __init__(self, file_like=None, echo=False, debug=0, buffer=False):
self.echo = echo self.echo = echo
self.debug = debug self.debug = debug
self.buffer = buffer self.buffer = buffer
self.env = env # the environment to use for _writer_daemon
self._active = False # used to prevent re-entry self._active = False # used to prevent re-entry
@ -393,18 +499,7 @@ def __enter__(self):
"file argument must be set by either __init__ or __call__") "file argument must be set by either __init__ or __call__")
# set up a stream for the daemon to write to # set up a stream for the daemon to write to
self.close_log_in_parent = True self.log_file = FileWrapper(self.file_like)
self.write_log_in_parent = False
if isinstance(self.file_like, string_types):
self.log_file = open(self.file_like, 'w')
elif _file_descriptors_work(self.file_like):
self.log_file = self.file_like
self.close_log_in_parent = False
else:
self.log_file = StringIO()
self.write_log_in_parent = True
# record parent color settings before redirecting. We do this # record parent color settings before redirecting. We do this
# because color output depends on whether the *original* stdout # because color output depends on whether the *original* stdout
@ -419,6 +514,8 @@ def __enter__(self):
# OS-level pipe for redirecting output to logger # OS-level pipe for redirecting output to logger
read_fd, write_fd = os.pipe() read_fd, write_fd = os.pipe()
read_multiprocess_fd = MultiProcessFd(read_fd)
# Multiprocessing pipe for communication back from the daemon # Multiprocessing pipe for communication back from the daemon
# Currently only used to save echo value between uses # Currently only used to save echo value between uses
self.parent_pipe, child_pipe = multiprocessing.Pipe() self.parent_pipe, child_pipe = multiprocessing.Pipe()
@ -427,24 +524,28 @@ def __enter__(self):
try: try:
# need to pass this b/c multiprocessing closes stdin in child. # need to pass this b/c multiprocessing closes stdin in child.
try: try:
input_stream = os.fdopen(os.dup(sys.stdin.fileno())) input_multiprocess_fd = MultiProcessFd(
except BaseException: os.dup(sys.stdin.fileno())
input_stream = None # just don't forward input if this fails
self.process = fork_context.Process(
target=_writer_daemon,
args=(
input_stream, read_fd, write_fd, self.echo, self.log_file,
child_pipe
) )
) except BaseException:
self.process.daemon = True # must set before start() # just don't forward input if this fails
self.process.start() input_multiprocess_fd = None
os.close(read_fd) # close in the parent process
with replace_environment(self.env):
self.process = multiprocessing.Process(
target=_writer_daemon,
args=(
input_multiprocess_fd, read_multiprocess_fd, write_fd,
self.echo, self.log_file, child_pipe
)
)
self.process.daemon = True # must set before start()
self.process.start()
finally: finally:
if input_stream: if input_multiprocess_fd:
input_stream.close() input_multiprocess_fd.close()
read_multiprocess_fd.close()
# Flush immediately before redirecting so that anything buffered # Flush immediately before redirecting so that anything buffered
# goes to the original stream # goes to the original stream
@ -515,18 +616,21 @@ def __exit__(self, exc_type, exc_val, exc_tb):
sys.stderr = self._saved_stderr sys.stderr = self._saved_stderr
# print log contents in parent if needed. # print log contents in parent if needed.
if self.write_log_in_parent: if self.log_file.write_in_parent:
string = self.parent_pipe.recv() string = self.parent_pipe.recv()
self.file_like.write(string) self.file_like.write(string)
if self.close_log_in_parent:
self.log_file.close()
# recover and store echo settings from the child before it dies # recover and store echo settings from the child before it dies
self.echo = self.parent_pipe.recv() try:
self.echo = self.parent_pipe.recv()
except EOFError:
# This may occur if some exception prematurely terminates the
# _writer_daemon. An exception will have already been generated.
pass
# join the daemon process. The daemon will quit automatically # now that the write pipe is closed (in this __exit__, when we restore
# when the write pipe is closed; we just wait for it here. # stdout with dup2), the logger daemon process loop will terminate. We
# wait for that here.
self.process.join() self.process.join()
# restore old color and debug settings # restore old color and debug settings
@ -555,7 +659,8 @@ def force_echo(self):
sys.stdout.flush() sys.stdout.flush()
def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe): def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo,
log_file_wrapper, control_pipe):
"""Daemon used by ``log_output`` to write to a log file and to ``stdout``. """Daemon used by ``log_output`` to write to a log file and to ``stdout``.
The daemon receives output from the parent process and writes it both The daemon receives output from the parent process and writes it both
@ -592,26 +697,39 @@ def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe):
``StringIO`` in the parent. This is mainly for testing. ``StringIO`` in the parent. This is mainly for testing.
Arguments: Arguments:
stdin (stream): input from the terminal stdin_multiprocess_fd (int): input from the terminal
read_fd (int): pipe for reading from parent's redirected stdout read_multiprocess_fd (int): pipe for reading from parent's redirected
write_fd (int): parent's end of the pipe will write to (will be stdout
immediately closed by the writer daemon)
echo (bool): initial echo setting -- controlled by user and echo (bool): initial echo setting -- controlled by user and
preserved across multiple writer daemons preserved across multiple writer daemons
log_file (file-like): file to log all output log_file_wrapper (FileWrapper): file to log all output
control_pipe (Pipe): multiprocessing pipe on which to send control control_pipe (Pipe): multiprocessing pipe on which to send control
information to the parent information to the parent
""" """
# If this process was forked, then it will inherit file descriptors from
# the parent process. This process depends on closing all instances of
# write_fd to terminate the reading loop, so we close the file descriptor
# here. Forking is the process spawning method everywhere except Mac OS
# for Python >= 3.8 and on Windows
if sys.version_info < (3, 8) or sys.platform != 'darwin':
os.close(write_fd)
# Use line buffering (3rd param = 1) since Python 3 has a bug # Use line buffering (3rd param = 1) since Python 3 has a bug
# that prevents unbuffered text I/O. # that prevents unbuffered text I/O.
in_pipe = os.fdopen(read_fd, 'r', 1) in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1)
os.close(write_fd)
if stdin_multiprocess_fd:
stdin = os.fdopen(stdin_multiprocess_fd.fd)
else:
stdin = None
# list of streams to select from # list of streams to select from
istreams = [in_pipe, stdin] if stdin else [in_pipe] istreams = [in_pipe, stdin] if stdin else [in_pipe]
force_echo = False # parent can force echo for certain output force_echo = False # parent can force echo for certain output
log_file = log_file_wrapper.unwrap()
try: try:
with keyboard_input(stdin) as kb: with keyboard_input(stdin) as kb:
while True: while True:
@ -672,10 +790,13 @@ def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe):
# send written data back to parent if we used a StringIO # send written data back to parent if we used a StringIO
if isinstance(log_file, StringIO): if isinstance(log_file, StringIO):
control_pipe.send(log_file.getvalue()) control_pipe.send(log_file.getvalue())
log_file.close() log_file_wrapper.close()
close_connection_and_file(read_multiprocess_fd, in_pipe)
if stdin_multiprocess_fd:
close_connection_and_file(stdin_multiprocess_fd, stdin)
# send echo value back to the parent so it can be preserved. # send echo value back to the parent so it can be preserved.
control_pipe.send(echo) control_pipe.send(echo)
def _retry(function): def _retry(function):

View file

@ -24,7 +24,6 @@
import traceback import traceback
import llnl.util.tty.log as log import llnl.util.tty.log as log
from llnl.util.lang import fork_context
from spack.util.executable import which from spack.util.executable import which
@ -234,7 +233,7 @@ def start(self, **kwargs):
``minion_function``. ``minion_function``.
""" """
self.proc = fork_context.Process( self.proc = multiprocessing.Process(
target=PseudoShell._set_up_and_run_controller_function, target=PseudoShell._set_up_and_run_controller_function,
args=(self.controller_function, self.minion_function, args=(self.controller_function, self.minion_function,
self.controller_timeout, self.sleep_time), self.controller_timeout, self.sleep_time),

View file

@ -45,7 +45,8 @@
import llnl.util.tty as tty import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree from llnl.util.filesystem import mkdirp, install, install_tree
from llnl.util.lang import dedupe, fork_context from llnl.util.lang import dedupe
from llnl.util.tty.log import MultiProcessFd
import spack.build_systems.cmake import spack.build_systems.cmake
import spack.build_systems.meson import spack.build_systems.meson
@ -54,6 +55,7 @@
import spack.paths import spack.paths
import spack.schema.environment import spack.schema.environment
import spack.store import spack.store
import spack.subprocess_context
import spack.architecture as arch import spack.architecture as arch
from spack.util.string import plural from spack.util.string import plural
from spack.util.environment import ( from spack.util.environment import (
@ -803,13 +805,72 @@ def modifications_from_dependencies(spec, context):
return env return env
def fork(pkg, function, dirty, fake): def _setup_pkg_and_run(serialized_pkg, function, kwargs, child_pipe,
"""Fork a child process to do part of a spack build. input_multiprocess_fd):
try:
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_multiprocess_fd is not None:
sys.stdin = os.fdopen(input_multiprocess_fd.fd)
pkg = serialized_pkg.restore()
if not kwargs.get('fake', False):
kwargs['unmodified_env'] = os.environ.copy()
setup_package(pkg, dirty=kwargs.get('dirty', False))
return_value = function(pkg, kwargs)
child_pipe.send(return_value)
except StopPhase as e:
# Do not create a full ChildError from this, it's not an error
# it's a control statement.
child_pipe.send(e)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
build_log = None
try:
if hasattr(pkg, 'log_path'):
build_log = pkg.log_path
except NameError:
# 'pkg' is not defined yet
pass
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, build_log, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
if input_multiprocess_fd is not None:
input_multiprocess_fd.close()
def start_build_process(pkg, function, kwargs):
"""Create a child process to do part of a spack build.
Args: Args:
pkg (PackageBase): package whose environment we should set up the pkg (PackageBase): package whose environment we should set up the
forked process for. child process for.
function (callable): argless function to run in the child function (callable): argless function to run in the child
process. process.
dirty (bool): If True, do NOT clean the environment before dirty (bool): If True, do NOT clean the environment before
@ -820,9 +881,9 @@ def fork(pkg, function, dirty, fake):
def child_fun(): def child_fun():
# do stuff # do stuff
build_env.fork(pkg, child_fun) build_env.start_build_process(pkg, child_fun)
Forked processes are run with the build environment set up by The child process is run with the build environment set up by
spack.build_environment. This allows package authors to have full spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds control over the environment, etc. without affecting other builds
that might be executed in the same spack call. that might be executed in the same spack call.
@ -830,64 +891,36 @@ def child_fun():
If something goes wrong, the child process catches the error and If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError. expected to handle (or re-raise) the ChildError.
This uses `multiprocessing.Process` to create the child process. The
mechanism used to create the process differs on different operating
systems and for different versions of Python. In some cases "fork"
is used (i.e. the "fork" system call) and some cases it starts an
entirely new Python interpreter process (in the docs this is referred
to as the "spawn" start method). Breaking it down by OS:
- Linux always uses fork.
- Mac OS uses fork before Python 3.8 and "spawn" for 3.8 and after.
- Windows always uses the "spawn" start method.
For more information on `multiprocessing` child process creation
mechanisms, see https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
""" """
def child_process(child_pipe, input_stream):
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_stream is not None:
sys.stdin = input_stream
try:
if not fake:
setup_package(pkg, dirty=dirty)
return_value = function()
child_pipe.send(return_value)
except StopPhase as e:
# Do not create a full ChildError from this, it's not an error
# it's a control statement.
child_pipe.send(e)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
build_log = None
if hasattr(pkg, 'log_path'):
build_log = pkg.log_path
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, build_log, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
parent_pipe, child_pipe = multiprocessing.Pipe() parent_pipe, child_pipe = multiprocessing.Pipe()
input_stream = None input_multiprocess_fd = None
serialized_pkg = spack.subprocess_context.PackageInstallContext(pkg)
try: try:
# Forward sys.stdin when appropriate, to allow toggling verbosity # Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'): if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_stream = os.fdopen(os.dup(sys.stdin.fileno())) input_fd = os.dup(sys.stdin.fileno())
input_multiprocess_fd = MultiProcessFd(input_fd)
p = fork_context.Process( p = multiprocessing.Process(
target=child_process, args=(child_pipe, input_stream)) target=_setup_pkg_and_run,
args=(serialized_pkg, function, kwargs, child_pipe,
input_multiprocess_fd))
p.start() p.start()
except InstallError as e: except InstallError as e:
@ -896,8 +929,8 @@ def child_process(child_pipe, input_stream):
finally: finally:
# Close the input stream in the parent process # Close the input stream in the parent process
if input_stream is not None: if input_multiprocess_fd is not None:
input_stream.close() input_multiprocess_fd.close()
child_result = parent_pipe.recv() child_result = parent_pipe.recv()
p.join() p.join()

View file

@ -214,14 +214,17 @@ def mirror_id(self):
"""BundlePackages don't have a mirror id.""" """BundlePackages don't have a mirror id."""
@pattern.composite(interface=FetchStrategy) class FetchStrategyComposite(pattern.Composite):
class FetchStrategyComposite(object):
"""Composite for a FetchStrategy object. """Composite for a FetchStrategy object.
Implements the GoF composite pattern.
""" """
matches = FetchStrategy.matches matches = FetchStrategy.matches
def __init__(self):
super(FetchStrategyComposite, self).__init__([
'fetch', 'check', 'expand', 'reset', 'archive', 'cachable',
'mirror_id'
])
def source_id(self): def source_id(self):
component_ids = tuple(i.source_id() for i in self) component_ids = tuple(i.source_id() for i in self)
if all(component_ids): if all(component_ids):

View file

@ -17,9 +17,13 @@
import spack.store import spack.store
#: Character limit for shebang line. Using Linux's 127 characters #: OS-imposed character limit for shebang line: 127 for Linux; 511 for Mac.
#: here, as it is the shortest I could find on a modern OS. #: Different Linux distributions have different limits, but 127 is the
shebang_limit = 127 #: smallest among all modern versions.
if sys.platform == 'darwin':
shebang_limit = 511
else:
shebang_limit = 127
def sbang_install_path(): def sbang_install_path():

View file

@ -1056,15 +1056,9 @@ def _install_task(self, task, **kwargs):
task (BuildTask): the installation build task for a package""" task (BuildTask): the installation build task for a package"""
cache_only = kwargs.get('cache_only', False) cache_only = kwargs.get('cache_only', False)
dirty = kwargs.get('dirty', False)
fake = kwargs.get('fake', False)
install_source = kwargs.get('install_source', False)
keep_stage = kwargs.get('keep_stage', False)
skip_patch = kwargs.get('skip_patch', False)
tests = kwargs.get('tests', False) tests = kwargs.get('tests', False)
unsigned = kwargs.get('unsigned', False) unsigned = kwargs.get('unsigned', False)
use_cache = kwargs.get('use_cache', True) use_cache = kwargs.get('use_cache', True)
verbose = kwargs.get('verbose', False)
full_hash_match = kwargs.get('full_hash_match', False) full_hash_match = kwargs.get('full_hash_match', False)
pkg = task.pkg pkg = task.pkg
@ -1087,110 +1081,6 @@ def _install_task(self, task, **kwargs):
pkg.run_tests = (tests is True or tests and pkg.name in tests) pkg.run_tests = (tests is True or tests and pkg.name in tests)
pid = '{0}: '.format(self.pid) if tty.show_pid() else ''
pre = '{0}{1}:'.format(pid, pkg.name)
def build_process():
"""
This function implements the process forked for each build.
It has its own process and python module space set up by
build_environment.fork().
This function's return value is returned to the parent process.
"""
start_time = time.time()
if not fake:
if not skip_patch:
pkg.do_patch()
else:
pkg.do_stage()
pkg_id = package_id(pkg)
tty.debug('{0} Building {1} [{2}]'
.format(pre, pkg_id, pkg.build_system_class))
# get verbosity from do_install() parameter or saved value
echo = verbose
if spack.package.PackageBase._verbose is not None:
echo = spack.package.PackageBase._verbose
pkg.stage.keep = keep_stage
# parent process already has a prefix write lock
with pkg.stage:
# Run the pre-install hook in the child process after
# the directory is created.
spack.hooks.pre_install(pkg.spec)
if fake:
_do_fake_install(pkg)
else:
source_path = pkg.stage.source_path
if install_source and os.path.isdir(source_path):
src_target = os.path.join(pkg.spec.prefix, 'share',
pkg.name, 'src')
tty.debug('{0} Copying source to {1}'
.format(pre, src_target))
fs.install_tree(pkg.stage.source_path, src_target)
# Do the real install in the source directory.
with fs.working_dir(pkg.stage.source_path):
# Save the build environment in a file before building.
dump_environment(pkg.env_path)
for attr in ('configure_args', 'cmake_args'):
try:
configure_args = getattr(pkg, attr)()
configure_args = ' '.join(configure_args)
with open(pkg.configure_args_path, 'w') as \
args_file:
args_file.write(configure_args)
break
except Exception:
pass
# cache debug settings
debug_level = tty.debug_level()
# Spawn a daemon that reads from a pipe and redirects
# everything to log_path
with log_output(pkg.log_path, echo, True) as logger:
for phase_name, phase_attr in zip(
pkg.phases, pkg._InstallPhase_phases):
with logger.force_echo():
inner_debug_level = tty.debug_level()
tty.set_debug(debug_level)
tty.msg("{0} Executing phase: '{1}'"
.format(pre, phase_name))
tty.set_debug(inner_debug_level)
# Redirect stdout and stderr to daemon pipe
phase = getattr(pkg, phase_attr)
phase(pkg.spec, pkg.prefix)
echo = logger.echo
log(pkg)
# Run post install hooks before build stage is removed.
spack.hooks.post_install(pkg.spec)
# Stop the timer
pkg._total_time = time.time() - start_time
build_time = pkg._total_time - pkg._fetch_time
tty.debug('{0} Successfully installed {1}'
.format(pre, pkg_id),
'Fetch: {0}. Build: {1}. Total: {2}.'
.format(_hms(pkg._fetch_time), _hms(build_time),
_hms(pkg._total_time)))
_print_installed_pkg(pkg.prefix)
# preserve verbosity across runs
return echo
# hook that allows tests to inspect the Package before installation # hook that allows tests to inspect the Package before installation
# see unit_test_check() docs. # see unit_test_check() docs.
if not pkg.unit_test_check(): if not pkg.unit_test_check():
@ -1199,10 +1089,12 @@ def build_process():
try: try:
self._setup_install_dir(pkg) self._setup_install_dir(pkg)
# Fork a child to do the actual installation. # Create a child process to do the actual installation.
# Preserve verbosity settings across installs. # Preserve verbosity settings across installs.
spack.package.PackageBase._verbose = spack.build_environment.fork( spack.package.PackageBase._verbose = (
pkg, build_process, dirty=dirty, fake=fake) spack.build_environment.start_build_process(
pkg, build_process, kwargs)
)
# Note: PARENT of the build process adds the new package to # Note: PARENT of the build process adds the new package to
# the database, so that we don't need to re-read from file. # the database, so that we don't need to re-read from file.
@ -1216,9 +1108,9 @@ def build_process():
except spack.build_environment.StopPhase as e: except spack.build_environment.StopPhase as e:
# A StopPhase exception means that do_install was asked to # A StopPhase exception means that do_install was asked to
# stop early from clients, and is not an error at this point # stop early from clients, and is not an error at this point
pre = '{0}'.format(self.pid) if tty.show_pid() else '' pid = '{0}: '.format(pkg.pid) if tty.show_pid() else ''
tty.debug('{0}{1}'.format(pid, str(e))) tty.debug('{0}{1}'.format(pid, str(e)))
tty.debug('Package stage directory : {0}' tty.debug('Package stage directory: {0}'
.format(pkg.stage.source_path)) .format(pkg.stage.source_path))
_install_task.__doc__ += install_args_docstring _install_task.__doc__ += install_args_docstring
@ -1666,6 +1558,117 @@ def spec(self):
return self.pkg.spec return self.pkg.spec
def build_process(pkg, kwargs):
"""Perform the installation/build of the package.
This runs in a separate child process, and has its own process and
python module space set up by build_environment.start_build_process().
This function's return value is returned to the parent process.
"""
keep_stage = kwargs.get('keep_stage', False)
install_source = kwargs.get('install_source', False)
skip_patch = kwargs.get('skip_patch', False)
verbose = kwargs.get('verbose', False)
fake = kwargs.get('fake', False)
unmodified_env = kwargs.get('unmodified_env', {})
start_time = time.time()
if not fake:
if not skip_patch:
pkg.do_patch()
else:
pkg.do_stage()
pid = '{0}: '.format(pkg.pid) if tty.show_pid() else ''
pre = '{0}{1}:'.format(pid, pkg.name)
pkg_id = package_id(pkg)
tty.debug('{0} Building {1} [{2}]'
.format(pre, pkg_id, pkg.build_system_class))
# get verbosity from do_install() parameter or saved value
echo = verbose
if spack.package.PackageBase._verbose is not None:
echo = spack.package.PackageBase._verbose
pkg.stage.keep = keep_stage
with pkg.stage:
# Run the pre-install hook in the child process after
# the directory is created.
spack.hooks.pre_install(pkg.spec)
if fake:
_do_fake_install(pkg)
else:
source_path = pkg.stage.source_path
if install_source and os.path.isdir(source_path):
src_target = os.path.join(pkg.spec.prefix, 'share',
pkg.name, 'src')
tty.debug('{0} Copying source to {1}'
.format(pre, src_target))
fs.install_tree(pkg.stage.source_path, src_target)
# Do the real install in the source directory.
with fs.working_dir(pkg.stage.source_path):
# Save the build environment in a file before building.
dump_environment(pkg.env_path)
for attr in ('configure_args', 'cmake_args'):
try:
configure_args = getattr(pkg, attr)()
configure_args = ' '.join(configure_args)
with open(pkg.configure_args_path, 'w') as \
args_file:
args_file.write(configure_args)
break
except Exception:
pass
# cache debug settings
debug_level = tty.debug_level()
# Spawn a daemon that reads from a pipe and redirects
# everything to log_path
with log_output(pkg.log_path, echo, True,
env=unmodified_env) as logger:
for phase_name, phase_attr in zip(
pkg.phases, pkg._InstallPhase_phases):
with logger.force_echo():
inner_debug_level = tty.debug_level()
tty.set_debug(debug_level)
tty.msg("{0} Executing phase: '{1}'"
.format(pre, phase_name))
tty.set_debug(inner_debug_level)
# Redirect stdout and stderr to daemon pipe
phase = getattr(pkg, phase_attr)
phase(pkg.spec, pkg.prefix)
echo = logger.echo
log(pkg)
# Run post install hooks before build stage is removed.
spack.hooks.post_install(pkg.spec)
# Stop the timer
pkg._total_time = time.time() - start_time
build_time = pkg._total_time - pkg._fetch_time
tty.debug('{0} Successfully installed {1}'
.format(pre, pkg_id),
'Fetch: {0}. Build: {1}. Total: {2}.'
.format(_hms(pkg._fetch_time), _hms(build_time),
_hms(pkg._total_time)))
_print_installed_pkg(pkg.prefix)
# preserve verbosity across runs
return echo
class BuildTask(object): class BuildTask(object):
"""Class for representing the build task for a package.""" """Class for representing the build task for a package."""

View file

@ -901,6 +901,10 @@ def _make_resource_stage(self, root_stage, fetcher, resource):
path=self.path) path=self.path)
return stage return stage
def _download_search(self):
dynamic_fetcher = fs.from_list_url(self)
return [dynamic_fetcher] if dynamic_fetcher else []
def _make_root_stage(self, fetcher): def _make_root_stage(self, fetcher):
# Construct a mirror path (TODO: get this out of package.py) # Construct a mirror path (TODO: get this out of package.py)
mirror_paths = spack.mirror.mirror_archive_paths( mirror_paths = spack.mirror.mirror_archive_paths(
@ -912,12 +916,8 @@ def _make_root_stage(self, fetcher):
stage_name = "{0}{1}-{2}-{3}".format(stage_prefix, s.name, s.version, stage_name = "{0}{1}-{2}-{3}".format(stage_prefix, s.name, s.version,
s.dag_hash()) s.dag_hash())
def download_search():
dynamic_fetcher = fs.from_list_url(self)
return [dynamic_fetcher] if dynamic_fetcher else []
stage = Stage(fetcher, mirror_paths=mirror_paths, name=stage_name, stage = Stage(fetcher, mirror_paths=mirror_paths, name=stage_name,
path=self.path, search_fn=download_search) path=self.path, search_fn=self._download_search)
return stage return stage
def _make_stage(self): def _make_stage(self):

View file

@ -1164,14 +1164,14 @@ def create_or_construct(path, namespace=None):
return Repo(path) return Repo(path)
def _path(): def _path(repo_dirs=None):
"""Get the singleton RepoPath instance for Spack. """Get the singleton RepoPath instance for Spack.
Create a RepoPath, add it to sys.meta_path, and return it. Create a RepoPath, add it to sys.meta_path, and return it.
TODO: consider not making this a singleton. TODO: consider not making this a singleton.
""" """
repo_dirs = spack.config.get('repos') repo_dirs = repo_dirs or spack.config.get('repos')
if not repo_dirs: if not repo_dirs:
raise NoRepoConfiguredError( raise NoRepoConfiguredError(
"Spack configuration contains no package repositories.") "Spack configuration contains no package repositories.")

View file

@ -919,6 +919,12 @@ def __set__(self, instance, value):
raise AttributeError(msg.format(cls_name, self.attribute_name)) raise AttributeError(msg.format(cls_name, self.attribute_name))
# Represents a query state in a BuildInterface object
QueryState = collections.namedtuple(
'QueryState', ['name', 'extra_parameters', 'isvirtual']
)
class SpecBuildInterface(lang.ObjectWrapper): class SpecBuildInterface(lang.ObjectWrapper):
command = ForwardQueryToPackage( command = ForwardQueryToPackage(
'command', 'command',
@ -938,11 +944,6 @@ class SpecBuildInterface(lang.ObjectWrapper):
def __init__(self, spec, name, query_parameters): def __init__(self, spec, name, query_parameters):
super(SpecBuildInterface, self).__init__(spec) super(SpecBuildInterface, self).__init__(spec)
# Represents a query state in a BuildInterface object
QueryState = collections.namedtuple(
'QueryState', ['name', 'extra_parameters', 'isvirtual']
)
is_virtual = Spec.is_virtual(name) is_virtual = Spec.is_virtual(name)
self.last_query = QueryState( self.last_query = QueryState(
name=name, name=name,

View file

@ -690,11 +690,7 @@ def _add_to_root_stage(self):
install(src, destination_path) install(src, destination_path)
@pattern.composite(method_list=[ class StageComposite(pattern.Composite):
'fetch', 'create', 'created', 'check', 'expand_archive', 'restage',
'destroy', 'cache_local', 'cache_mirror', 'steal_source',
'managed_by_spack'])
class StageComposite:
"""Composite for Stage type objects. The first item in this composite is """Composite for Stage type objects. The first item in this composite is
considered to be the root package, and operations that return a value are considered to be the root package, and operations that return a value are
forwarded to it.""" forwarded to it."""
@ -702,6 +698,12 @@ class StageComposite:
# __enter__ and __exit__ delegate to all stages in the composite. # __enter__ and __exit__ delegate to all stages in the composite.
# #
def __init__(self):
super(StageComposite, self).__init__([
'fetch', 'create', 'created', 'check', 'expand_archive', 'restage',
'destroy', 'cache_local', 'cache_mirror', 'steal_source',
'managed_by_spack'])
def __enter__(self): def __enter__(self):
for item in self: for item in self:
item.__enter__() item.__enter__()

View file

@ -103,10 +103,23 @@ def _store():
#: Singleton store instance #: Singleton store instance
store = llnl.util.lang.Singleton(_store) store = llnl.util.lang.Singleton(_store)
def _store_root():
return store.root
def _store_db():
return store.db
def _store_layout():
return store.layout
# convenience accessors for parts of the singleton store # convenience accessors for parts of the singleton store
root = llnl.util.lang.LazyReference(lambda: store.root) root = llnl.util.lang.LazyReference(_store_root)
db = llnl.util.lang.LazyReference(lambda: store.db) db = llnl.util.lang.LazyReference(_store_db)
layout = llnl.util.lang.LazyReference(lambda: store.layout) layout = llnl.util.lang.LazyReference(_store_layout)
def retrieve_upstream_dbs(): def retrieve_upstream_dbs():

View file

@ -0,0 +1,149 @@
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module handles transmission of Spack state to child processes started
using the 'spawn' start method. Notably, installations are performed in a
subprocess and require transmitting the Package object (in such a way
that the repository is available for importing when it is deserialized);
installations performed in Spack unit tests may include additional
modifications to global state in memory that must be replicated in the
child process.
"""
from types import ModuleType
import pickle
import pydoc
import io
import sys
import multiprocessing
import spack.architecture
import spack.config
_serialize = sys.version_info >= (3, 8) and sys.platform == 'darwin'
patches = None
def append_patch(patch):
global patches
if not patches:
patches = list()
patches.append(patch)
def serialize(obj):
serialized_obj = io.BytesIO()
pickle.dump(obj, serialized_obj)
serialized_obj.seek(0)
return serialized_obj
class SpackTestProcess(object):
def __init__(self, fn):
self.fn = fn
def _restore_and_run(self, fn, test_state):
test_state.restore()
fn()
def create(self):
test_state = TestState()
return multiprocessing.Process(
target=self._restore_and_run,
args=(self.fn, test_state))
class PackageInstallContext(object):
"""Captures the in-memory process state of a package installation that
needs to be transmitted to a child process.
"""
def __init__(self, pkg):
if _serialize:
self.serialized_pkg = serialize(pkg)
else:
self.pkg = pkg
self.test_state = TestState()
def restore(self):
self.test_state.restore()
if _serialize:
return pickle.load(self.serialized_pkg)
else:
return self.pkg
class TestState(object):
"""Spack tests may modify state that is normally read from disk in memory;
this object is responsible for properly serializing that state to be
applied to a subprocess. This isn't needed outside of a testing environment
but this logic is designed to behave the same inside or outside of tests.
"""
def __init__(self):
if _serialize:
self.repo_dirs = list(r.root for r in spack.repo.path.repos)
self.config = spack.config.config
self.platform = spack.architecture.platform
self.test_patches = store_patches()
# TODO: transfer spack.store.store? note that you should not
# transfer spack.store.store and spack.store.db: 'db' is a
# shortcut that accesses the store (so transferring both can
# create an inconsistency). Some tests set 'db' directly, and
# others set 'store'
def restore(self):
if _serialize:
spack.repo.path = spack.repo._path(self.repo_dirs)
spack.config.config = self.config
spack.architecture.platform = self.platform
self.test_patches.restore()
class TestPatches(object):
def __init__(self, module_patches, class_patches):
self.module_patches = list(
(x, y, serialize(z)) for (x, y, z) in module_patches)
self.class_patches = list(
(x, y, serialize(z)) for (x, y, z) in class_patches)
def restore(self):
for module_name, attr_name, value in self.module_patches:
value = pickle.load(value)
module = __import__(module_name)
setattr(module, attr_name, value)
for class_fqn, attr_name, value in self.class_patches:
value = pickle.load(value)
cls = pydoc.locate(class_fqn)
setattr(cls, attr_name, value)
def store_patches():
global patches
module_patches = list()
class_patches = list()
if not patches:
return TestPatches(list(), list())
for patch in patches:
for target, name, _ in patches:
if isinstance(target, ModuleType):
new_val = getattr(target, name)
module_name = target.__name__
module_patches.append((module_name, name, new_val))
elif isinstance(target, type):
new_val = getattr(target, name)
class_fqn = '.'.join([target.__module__, target.__name__])
class_patches.append((class_fqn, name, new_val))
return TestPatches(module_patches, class_patches)
def clear_patches():
global patches
patches = None

View file

@ -149,9 +149,11 @@ def install_dir_default_layout(tmpdir):
spack.store.store = spack.store.Store(str(tmpdir.join('opt'))) spack.store.store = spack.store.Store(str(tmpdir.join('opt')))
spack.store.layout = YamlDirectoryLayout(str(tmpdir.join('opt')), spack.store.layout = YamlDirectoryLayout(str(tmpdir.join('opt')),
path_scheme=def_install_path_scheme) # noqa: E501 path_scheme=def_install_path_scheme) # noqa: E501
yield spack.store try:
spack.store.store = real_store yield spack.store
spack.store.layout = real_layout finally:
spack.store.store = real_store
spack.store.layout = real_layout
@pytest.fixture(scope='function') @pytest.fixture(scope='function')
@ -162,9 +164,11 @@ def install_dir_non_default_layout(tmpdir):
spack.store.store = spack.store.Store(str(tmpdir.join('opt'))) spack.store.store = spack.store.Store(str(tmpdir.join('opt')))
spack.store.layout = YamlDirectoryLayout(str(tmpdir.join('opt')), spack.store.layout = YamlDirectoryLayout(str(tmpdir.join('opt')),
path_scheme=ndef_install_path_scheme) # noqa: E501 path_scheme=ndef_install_path_scheme) # noqa: E501
yield spack.store try:
spack.store.store = real_store yield spack.store
spack.store.layout = real_layout finally:
spack.store.store = real_store
spack.store.layout = real_layout
args = ['strings', 'file'] args = ['strings', 'file']
@ -582,6 +586,13 @@ def test_built_spec_cache(tmpdir,
mirror.mirror(mparser, margs) mirror.mirror(mparser, margs)
def fake_full_hash(spec):
# Generate an arbitrary hash that is intended to be different than
# whatever a Spec reported before (to test actions that trigger when
# the hash changes)
return 'tal4c7h4z0gqmixb1eqa92mjoybxn5l6'
def test_spec_needs_rebuild(install_mockery_mutable_config, mock_packages, def test_spec_needs_rebuild(install_mockery_mutable_config, mock_packages,
mock_fetch, monkeypatch, tmpdir): mock_fetch, monkeypatch, tmpdir):
"""Make sure needs_rebuild properly compares remote full_hash """Make sure needs_rebuild properly compares remote full_hash
@ -606,9 +617,6 @@ def test_spec_needs_rebuild(install_mockery_mutable_config, mock_packages,
assert not rebuild assert not rebuild
# Now monkey patch Spec to change the full hash on the package # Now monkey patch Spec to change the full hash on the package
def fake_full_hash(spec):
print('fake_full_hash')
return 'tal4c7h4z0gqmixb1eqa92mjoybxn5l6'
monkeypatch.setattr(spack.spec.Spec, 'full_hash', fake_full_hash) monkeypatch.setattr(spack.spec.Spec, 'full_hash', fake_full_hash)
rebuild = bindist.needs_rebuild(s, mirror_url, rebuild_on_errors=True) rebuild = bindist.needs_rebuild(s, mirror_url, rebuild_on_errors=True)

View file

@ -108,20 +108,24 @@ def test_dev_build_before_until(tmpdir, mock_packages, install_mockery):
'dev-build-test-install@0.0.0') 'dev-build-test-install@0.0.0')
def print_spack_cc(*args):
# Eat arguments and print environment variable to test
print(os.environ.get('CC', ''))
# `module unload cray-libsci` in test environment causes failure
# It does not fail for actual installs
# build_environment.py imports module directly, so we monkeypatch it there
# rather than in module_cmd
def mock_module_noop(*args):
pass
def test_dev_build_drop_in(tmpdir, mock_packages, monkeypatch, def test_dev_build_drop_in(tmpdir, mock_packages, monkeypatch,
install_mockery): install_mockery):
def print_spack_cc(*args):
# Eat arguments and print environment variable to test
print(os.environ.get('CC', ''))
monkeypatch.setattr(os, 'execvp', print_spack_cc) monkeypatch.setattr(os, 'execvp', print_spack_cc)
# `module unload cray-libsci` in test environment causes failure monkeypatch.setattr(spack.build_environment, 'module', mock_module_noop)
# It does not fail for actual installs
# build_environment.py imports module directly, so we monkeypatch it there
# rather than in module_cmd
def module(*args):
pass
monkeypatch.setattr(spack.build_environment, 'module', module)
with tmpdir.as_cwd(): with tmpdir.as_cwd():
output = dev_build('-b', 'edit', '--drop-in', 'sh', output = dev_build('-b', 'edit', '--drop-in', 'sh',

View file

@ -811,6 +811,13 @@ def test_install_fails_no_args_suggests_env_activation(tmpdir):
assert 'using the `spack.yaml` in this directory' in output assert 'using the `spack.yaml` in this directory' in output
def fake_full_hash(spec):
# Generate an arbitrary hash that is intended to be different than
# whatever a Spec reported before (to test actions that trigger when
# the hash changes)
return 'tal4c7h4z0gqmixb1eqa92mjoybxn5l6'
def test_cache_install_full_hash_match( def test_cache_install_full_hash_match(
install_mockery_mutable_config, mock_packages, mock_fetch, install_mockery_mutable_config, mock_packages, mock_fetch,
mock_archive, mutable_config, monkeypatch, tmpdir): mock_archive, mutable_config, monkeypatch, tmpdir):
@ -843,9 +850,6 @@ def test_cache_install_full_hash_match(
uninstall('-y', s.name) uninstall('-y', s.name)
# Now monkey patch Spec to change the full hash on the package # Now monkey patch Spec to change the full hash on the package
def fake_full_hash(spec):
print('fake_full_hash')
return 'tal4c7h4z0gqmixb1eqa92mjoybxn5l6'
monkeypatch.setattr(spack.spec.Spec, 'full_hash', fake_full_hash) monkeypatch.setattr(spack.spec.Spec, 'full_hash', fake_full_hash)
# Check that even if the full hash changes, we install from binary when # Check that even if the full hash changes, we install from binary when

View file

@ -34,6 +34,7 @@
import spack.stage import spack.stage
import spack.util.executable import spack.util.executable
import spack.util.gpg import spack.util.gpg
import spack.subprocess_context
import spack.util.spack_yaml as syaml import spack.util.spack_yaml as syaml
from spack.util.pattern import Bunch from spack.util.pattern import Bunch
@ -41,11 +42,34 @@
from spack.fetch_strategy import FetchError from spack.fetch_strategy import FetchError
@pytest.fixture(autouse=True)
def clear_recorded_monkeypatches():
yield
spack.subprocess_context.clear_patches()
@pytest.fixture(scope='session', autouse=True)
def record_monkeypatch_setattr():
import _pytest
saved_setattr = _pytest.monkeypatch.MonkeyPatch.setattr
def record_setattr(cls, target, name, value, *args, **kwargs):
spack.subprocess_context.append_patch((target, name, value))
saved_setattr(cls, target, name, value, *args, **kwargs)
_pytest.monkeypatch.MonkeyPatch.setattr = record_setattr
try:
yield
finally:
_pytest.monkeypatch.MonkeyPatch.setattr = saved_setattr
def _can_access(path, perms):
return False
@pytest.fixture @pytest.fixture
def no_path_access(monkeypatch): def no_path_access(monkeypatch):
def _can_access(path, perms):
return False
monkeypatch.setattr(os, 'access', _can_access) monkeypatch.setattr(os, 'access', _can_access)
@ -70,6 +94,10 @@ def clean_user_environment():
ev.activate(active) ev.activate(active)
def _verify_executables_noop(*args):
return None
# #
# Disable checks on compiler executable existence # Disable checks on compiler executable existence
# #
@ -86,7 +114,7 @@ def mock_compiler_executable_verification(request, monkeypatch):
if 'enable_compiler_verification' not in request.keywords: if 'enable_compiler_verification' not in request.keywords:
monkeypatch.setattr(spack.compiler.Compiler, monkeypatch.setattr(spack.compiler.Compiler,
'verify_executables', 'verify_executables',
lambda x: None) _verify_executables_noop)
# Hooks to add command line options or set other custom behaviors. # Hooks to add command line options or set other custom behaviors.
@ -245,25 +273,27 @@ def check_for_leftover_stage_files(request, mock_stage, ignore_stage_files):
assert not files_in_stage assert not files_in_stage
class MockCache(object):
def store(self, copy_cmd, relative_dest):
pass
def fetcher(self, target_path, digest, **kwargs):
return MockCacheFetcher()
class MockCacheFetcher(object):
def fetch(self):
raise FetchError('Mock cache always fails for tests')
def __str__(self):
return "[mock fetch cache]"
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def mock_fetch_cache(monkeypatch): def mock_fetch_cache(monkeypatch):
"""Substitutes spack.paths.fetch_cache with a mock object that does nothing """Substitutes spack.paths.fetch_cache with a mock object that does nothing
and raises on fetch. and raises on fetch.
""" """
class MockCache(object):
def store(self, copy_cmd, relative_dest):
pass
def fetcher(self, target_path, digest, **kwargs):
return MockCacheFetcher()
class MockCacheFetcher(object):
def fetch(self):
raise FetchError('Mock cache always fails for tests')
def __str__(self):
return "[mock fetch cache]"
monkeypatch.setattr(spack.caches, 'fetch_cache', MockCache()) monkeypatch.setattr(spack.caches, 'fetch_cache', MockCache())
@ -286,7 +316,15 @@ def _skip_if_missing_executables(request):
# FIXME: session-scope. Anyhow doing it is not easy, as it seems # FIXME: session-scope. Anyhow doing it is not easy, as it seems
# FIXME: there's some weird interaction with compilers during concretization. # FIXME: there's some weird interaction with compilers during concretization.
spack.architecture.real_platform = spack.architecture.platform spack.architecture.real_platform = spack.architecture.platform
spack.architecture.platform = lambda: spack.platforms.test.Test()
def test_platform():
return spack.platforms.test.Test()
spack.architecture.platform = test_platform
# FIXME: Since we change the architecture above, we have to (re)initialize # FIXME: Since we change the architecture above, we have to (re)initialize
# FIXME: the config singleton. If it gets initialized too early with the # FIXME: the config singleton. If it gets initialized too early with the
# FIXME: actual architecture, tests will fail. # FIXME: actual architecture, tests will fail.
@ -356,14 +394,15 @@ def mock_repo_path():
yield spack.repo.RepoPath(spack.paths.mock_packages_path) yield spack.repo.RepoPath(spack.paths.mock_packages_path)
def _pkg_install_fn(pkg, spec, prefix):
# sanity_check_prefix requires something in the install directory
mkdirp(prefix.bin)
@pytest.fixture @pytest.fixture
def mock_pkg_install(monkeypatch): def mock_pkg_install(monkeypatch):
def _pkg_install_fn(pkg, spec, prefix): monkeypatch.setattr(spack.package.PackageBase, 'install',
# sanity_check_prefix requires something in the install directory _pkg_install_fn, raising=False)
mkdirp(prefix.bin)
monkeypatch.setattr(spack.package.PackageBase, 'install', _pkg_install_fn,
raising=False)
@pytest.fixture(scope='function') @pytest.fixture(scope='function')
@ -580,10 +619,10 @@ def mock_store(tmpdir_factory, mock_repo_path, mock_configuration,
@pytest.fixture(scope='function') @pytest.fixture(scope='function')
def database(mock_store, mock_packages, config): def database(mock_store, mock_packages, config, monkeypatch):
"""This activates the mock store, packages, AND config.""" """This activates the mock store, packages, AND config."""
with use_store(mock_store): monkeypatch.setattr(spack.store, 'store', mock_store)
yield mock_store.db yield mock_store.db
# Force reading the database again between tests # Force reading the database again between tests
mock_store.db.last_seen_verifier = '' mock_store.db.last_seen_verifier = ''
@ -631,6 +670,10 @@ def dirs_with_libfiles(tmpdir_factory):
yield lib_to_dirs, all_dirs yield lib_to_dirs, all_dirs
def _compiler_link_paths_noop(*args):
return []
@pytest.fixture(scope='function', autouse=True) @pytest.fixture(scope='function', autouse=True)
def disable_compiler_execution(monkeypatch, request): def disable_compiler_execution(monkeypatch, request):
""" """
@ -641,31 +684,27 @@ def disable_compiler_execution(monkeypatch, request):
If a test is marked in that way this is a no-op.""" If a test is marked in that way this is a no-op."""
if 'enable_compiler_link_paths' not in request.keywords: if 'enable_compiler_link_paths' not in request.keywords:
def noop(*args):
return []
# Compiler.determine_implicit_rpaths actually runs the compiler. So # Compiler.determine_implicit_rpaths actually runs the compiler. So
# replace that function with a noop that simulates finding no implicit # replace that function with a noop that simulates finding no implicit
# RPATHs # RPATHs
monkeypatch.setattr( monkeypatch.setattr(
spack.compiler.Compiler, spack.compiler.Compiler,
'_get_compiler_link_paths', '_get_compiler_link_paths',
noop _compiler_link_paths_noop
) )
@pytest.fixture(scope='function') @pytest.fixture(scope='function')
def install_mockery(tmpdir, config, mock_packages, monkeypatch): def install_mockery(tmpdir, config, mock_packages, monkeypatch):
"""Hooks a fake install directory, DB, and stage directory into Spack.""" """Hooks a fake install directory, DB, and stage directory into Spack."""
real_store = spack.store.store monkeypatch.setattr(
spack.store.store = spack.store.Store(str(tmpdir.join('opt'))) spack.store, 'store', spack.store.Store(str(tmpdir.join('opt'))))
# We use a fake package, so temporarily disable checksumming # We use a fake package, so temporarily disable checksumming
with spack.config.override('config:checksum', False): with spack.config.override('config:checksum', False):
yield yield
tmpdir.join('opt').remove() tmpdir.join('opt').remove()
spack.store.store = real_store
# Also wipe out any cached prefix failure locks (associated with # Also wipe out any cached prefix failure locks (associated with
# the session-scoped mock archive). # the session-scoped mock archive).
@ -687,31 +726,24 @@ def install_mockery_mutable_config(
also need to modify configuration (and hence would want to use also need to modify configuration (and hence would want to use
'mutable config'): 'install_mockery' does not support this. 'mutable config'): 'install_mockery' does not support this.
""" """
real_store = spack.store.store monkeypatch.setattr(
spack.store.store = spack.store.Store(str(tmpdir.join('opt'))) spack.store, 'store', spack.store.Store(str(tmpdir.join('opt'))))
# We use a fake package, so temporarily disable checksumming # We use a fake package, so temporarily disable checksumming
with spack.config.override('config:checksum', False): with spack.config.override('config:checksum', False):
yield yield
tmpdir.join('opt').remove() tmpdir.join('opt').remove()
spack.store.store = real_store
@pytest.fixture() @pytest.fixture()
def mock_fetch(mock_archive): def mock_fetch(mock_archive, monkeypatch):
"""Fake the URL for a package so it downloads from a file.""" """Fake the URL for a package so it downloads from a file."""
fetcher = FetchStrategyComposite() mock_fetcher = FetchStrategyComposite()
fetcher.append(URLFetchStrategy(mock_archive.url)) mock_fetcher.append(URLFetchStrategy(mock_archive.url))
@property monkeypatch.setattr(
def fake_fn(self): spack.package.PackageBase, 'fetcher', mock_fetcher)
return fetcher
orig_fn = spack.package.PackageBase.fetcher
spack.package.PackageBase.fetcher = fake_fn
yield
spack.package.PackageBase.fetcher = orig_fn
class MockLayout(object): class MockLayout(object):
@ -737,6 +769,47 @@ def create_layout(root):
yield create_layout yield create_layout
class MockConfig(object):
def __init__(self, configuration, writer_key):
self._configuration = configuration
self.writer_key = writer_key
def configuration(self):
return self._configuration
def writer_configuration(self):
return self.configuration()[self.writer_key]
class ConfigUpdate(object):
def __init__(self, root_for_conf, writer_mod, writer_key, monkeypatch):
self.root_for_conf = root_for_conf
self.writer_mod = writer_mod
self.writer_key = writer_key
self.monkeypatch = monkeypatch
def __call__(self, filename):
file = os.path.join(self.root_for_conf, filename + '.yaml')
with open(file) as f:
mock_config = MockConfig(syaml.load_config(f), self.writer_key)
self.monkeypatch.setattr(
spack.modules.common,
'configuration',
mock_config.configuration
)
self.monkeypatch.setattr(
self.writer_mod,
'configuration',
mock_config.writer_configuration
)
self.monkeypatch.setattr(
self.writer_mod,
'configuration_registry',
{}
)
@pytest.fixture() @pytest.fixture()
def module_configuration(monkeypatch, request): def module_configuration(monkeypatch, request):
"""Reads the module configuration file from the mock ones prepared """Reads the module configuration file from the mock ones prepared
@ -753,34 +826,7 @@ def module_configuration(monkeypatch, request):
spack.paths.test_path, 'data', 'modules', writer_key spack.paths.test_path, 'data', 'modules', writer_key
) )
def _impl(filename): return ConfigUpdate(root_for_conf, writer_mod, writer_key, monkeypatch)
file = os.path.join(root_for_conf, filename + '.yaml')
with open(file) as f:
configuration = syaml.load_config(f)
def mock_config_function():
return configuration
def writer_key_function():
return mock_config_function()[writer_key]
monkeypatch.setattr(
spack.modules.common,
'configuration',
mock_config_function
)
monkeypatch.setattr(
writer_mod,
'configuration',
writer_key_function
)
monkeypatch.setattr(
writer_mod,
'configuration_registry',
{}
)
return _impl
@pytest.fixture() @pytest.fixture()

View file

@ -23,7 +23,6 @@
import llnl.util.lock as lk import llnl.util.lock as lk
from llnl.util.tty.colify import colify from llnl.util.tty.colify import colify
from llnl.util.lang import fork_context
import spack.repo import spack.repo
import spack.store import spack.store
@ -319,7 +318,7 @@ def _check_merkleiness():
def _check_db_sanity(database): def _check_db_sanity(database):
"""Utiilty function to check db against install layout.""" """Utility function to check db against install layout."""
pkg_in_layout = sorted(spack.store.layout.all_specs()) pkg_in_layout = sorted(spack.store.layout.all_specs())
actual = sorted(database.query()) actual = sorted(database.query())
@ -517,14 +516,20 @@ def test_026_reindex_after_deprecate(mutable_database):
_check_db_sanity(mutable_database) _check_db_sanity(mutable_database)
def test_030_db_sanity_from_another_process(mutable_database): class ReadModify(object):
def read_and_modify(): """Provide a function which can execute in a separate process that removes
a spec from the database.
"""
def __call__(self):
# check that other process can read DB # check that other process can read DB
_check_db_sanity(mutable_database) _check_db_sanity(spack.store.db)
with mutable_database.write_transaction(): with spack.store.db.write_transaction():
_mock_remove('mpileaks ^zmpi') _mock_remove('mpileaks ^zmpi')
p = fork_context.Process(target=read_and_modify, args=())
def test_030_db_sanity_from_another_process(mutable_database):
spack_process = spack.subprocess_context.SpackTestProcess(ReadModify())
p = spack_process.create()
p.start() p.start()
p.join() p.join()

View file

@ -19,6 +19,11 @@
_spack_configure_argsfile) _spack_configure_argsfile)
def find_nothing(*args):
raise spack.repo.UnknownPackageError(
'Repo package access is disabled for test')
def test_install_and_uninstall(install_mockery, mock_fetch, monkeypatch): def test_install_and_uninstall(install_mockery, mock_fetch, monkeypatch):
# Get a basic concrete spec for the trivial install package. # Get a basic concrete spec for the trivial install package.
spec = Spec('trivial-install-test-package') spec = Spec('trivial-install-test-package')
@ -28,10 +33,6 @@ def test_install_and_uninstall(install_mockery, mock_fetch, monkeypatch):
# Get the package # Get the package
pkg = spec.package pkg = spec.package
def find_nothing(*args):
raise spack.repo.UnknownPackageError(
'Repo package access is disabled for test')
try: try:
pkg.do_install() pkg.do_install()
@ -83,23 +84,25 @@ def create(self):
self.wrapped_stage.create() self.wrapped_stage.create()
def __getattr__(self, attr): def __getattr__(self, attr):
if attr == 'wrapped_stage':
# This attribute may not be defined at some point during unpickling
raise AttributeError()
return getattr(self.wrapped_stage, attr) return getattr(self.wrapped_stage, attr)
def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch): def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch):
spec = Spec('canfail').concretized() spec = Spec('canfail').concretized()
pkg = spack.repo.get(spec) pkg = spack.repo.get(spec)
remove_prefix = spack.package.Package.remove_prefix
instance_rm_prefix = pkg.remove_prefix instance_rm_prefix = pkg.remove_prefix
try: try:
pkg.succeed = False pkg.succeed = False
spack.package.Package.remove_prefix = mock_remove_prefix pkg.remove_prefix = mock_remove_prefix
with pytest.raises(MockInstallError): with pytest.raises(MockInstallError):
pkg.do_install() pkg.do_install()
assert os.path.isdir(pkg.prefix) assert os.path.isdir(pkg.prefix)
rm_prefix_checker = RemovePrefixChecker(instance_rm_prefix) rm_prefix_checker = RemovePrefixChecker(instance_rm_prefix)
spack.package.Package.remove_prefix = rm_prefix_checker.remove_prefix pkg.remove_prefix = rm_prefix_checker.remove_prefix
# must clear failure markings for the package before re-installing it # must clear failure markings for the package before re-installing it
spack.store.db.clear_failure(spec, True) spack.store.db.clear_failure(spec, True)
@ -113,7 +116,7 @@ def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch):
assert pkg.installed assert pkg.installed
finally: finally:
spack.package.Package.remove_prefix = remove_prefix pkg.remove_prefix = instance_rm_prefix
def test_dont_add_patches_to_installed_package(install_mockery, mock_fetch): def test_dont_add_patches_to_installed_package(install_mockery, mock_fetch):
@ -181,7 +184,8 @@ def test_flatten_deps(
def test_installed_upstream_external( def test_installed_upstream_external(
tmpdir_factory, install_mockery, mock_fetch, gen_mock_layout): tmpdir_factory, install_mockery, mock_fetch, gen_mock_layout,
monkeypatch):
"""Check that when a dependency package is recorded as installed in """Check that when a dependency package is recorded as installed in
an upstream database that it is not reinstalled. an upstream database that it is not reinstalled.
""" """
@ -194,29 +198,26 @@ def test_installed_upstream_external(
dependency.concretize() dependency.concretize()
prepared_db.add(dependency, upstream_layout) prepared_db.add(dependency, upstream_layout)
try: downstream_db_root = str(
original_db = spack.store.db tmpdir_factory.mktemp('mock_downstream_db_root'))
downstream_db_root = str( db_for_test = spack.database.Database(
tmpdir_factory.mktemp('mock_downstream_db_root')) downstream_db_root, upstream_dbs=[prepared_db])
spack.store.db = spack.database.Database( monkeypatch.setattr(spack.store, 'db', db_for_test)
downstream_db_root, upstream_dbs=[prepared_db]) dependent = spack.spec.Spec('externaltest')
dependent = spack.spec.Spec('externaltest') dependent.concretize()
dependent.concretize()
new_dependency = dependent['externaltool'] new_dependency = dependent['externaltool']
assert new_dependency.external assert new_dependency.external
assert new_dependency.prefix == '/path/to/external_tool' assert new_dependency.prefix == '/path/to/external_tool'
dependent.package.do_install() dependent.package.do_install()
assert not os.path.exists(new_dependency.prefix) assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix) assert os.path.exists(dependent.prefix)
finally:
spack.store.db = original_db
def test_installed_upstream(tmpdir_factory, install_mockery, mock_fetch, def test_installed_upstream(tmpdir_factory, install_mockery, mock_fetch,
gen_mock_layout): gen_mock_layout, monkeypatch):
"""Check that when a dependency package is recorded as installed in """Check that when a dependency package is recorded as installed in
an upstream database that it is not reinstalled. an upstream database that it is not reinstalled.
""" """
@ -229,26 +230,23 @@ def test_installed_upstream(tmpdir_factory, install_mockery, mock_fetch,
dependency.concretize() dependency.concretize()
prepared_db.add(dependency, upstream_layout) prepared_db.add(dependency, upstream_layout)
try: downstream_db_root = str(
original_db = spack.store.db tmpdir_factory.mktemp('mock_downstream_db_root'))
downstream_db_root = str( db_for_test = spack.database.Database(
tmpdir_factory.mktemp('mock_downstream_db_root')) downstream_db_root, upstream_dbs=[prepared_db])
spack.store.db = spack.database.Database( monkeypatch.setattr(spack.store, 'db', db_for_test)
downstream_db_root, upstream_dbs=[prepared_db]) dependent = spack.spec.Spec('dependent-install')
dependent = spack.spec.Spec('dependent-install') dependent.concretize()
dependent.concretize()
new_dependency = dependent['dependency-install'] new_dependency = dependent['dependency-install']
assert new_dependency.package.installed_upstream assert new_dependency.package.installed_upstream
assert (new_dependency.prefix == assert (new_dependency.prefix ==
upstream_layout.path_for_spec(dependency)) upstream_layout.path_for_spec(dependency))
dependent.package.do_install() dependent.package.do_install()
assert not os.path.exists(new_dependency.prefix) assert not os.path.exists(new_dependency.prefix)
assert os.path.exists(dependent.prefix) assert os.path.exists(dependent.prefix)
finally:
spack.store.db = original_db
@pytest.mark.disable_clean_stage_check @pytest.mark.disable_clean_stage_check

View file

@ -626,7 +626,7 @@ def _add(_compilers):
# Preclude any meaningful side-effects # Preclude any meaningful side-effects
monkeypatch.setattr(spack.package.PackageBase, 'unit_test_check', _true) monkeypatch.setattr(spack.package.PackageBase, 'unit_test_check', _true)
monkeypatch.setattr(inst.PackageInstaller, '_setup_install_dir', _noop) monkeypatch.setattr(inst.PackageInstaller, '_setup_install_dir', _noop)
monkeypatch.setattr(spack.build_environment, 'fork', _noop) monkeypatch.setattr(spack.build_environment, 'start_build_process', _noop)
monkeypatch.setattr(spack.database.Database, 'add', _noop) monkeypatch.setattr(spack.database.Database, 'add', _noop)
monkeypatch.setattr(spack.compilers, 'add_compilers_to_config', _add) monkeypatch.setattr(spack.compilers, 'add_compilers_to_config', _add)

View file

@ -53,14 +53,13 @@
import glob import glob
import getpass import getpass
from contextlib import contextmanager from contextlib import contextmanager
from multiprocessing import Queue from multiprocessing import Process, Queue
import pytest import pytest
import llnl.util.lock as lk import llnl.util.lock as lk
import llnl.util.multiproc as mp import llnl.util.multiproc as mp
from llnl.util.filesystem import touch from llnl.util.filesystem import touch
from llnl.util.lang import fork_context
# #
@ -217,7 +216,7 @@ def local_multiproc_test(*functions, **kwargs):
b = mp.Barrier(len(functions), timeout=barrier_timeout) b = mp.Barrier(len(functions), timeout=barrier_timeout)
args = (b,) + tuple(kwargs.get('extra_args', ())) args = (b,) + tuple(kwargs.get('extra_args', ()))
procs = [fork_context.Process(target=f, args=args, name=f.__name__) procs = [Process(target=f, args=args, name=f.__name__)
for f in functions] for f in functions]
for p in procs: for p in procs:
@ -278,42 +277,74 @@ def wait(self):
# #
# Process snippets below can be composed into tests. # Process snippets below can be composed into tests.
# #
def acquire_write(lock_path, start=0, length=0): class AcquireWrite(object):
def fn(barrier): def __init__(self, lock_path, start=0, length=0):
lock = lk.Lock(lock_path, start, length) self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
lock.acquire_write() # grab exclusive lock lock.acquire_write() # grab exclusive lock
barrier.wait() barrier.wait()
barrier.wait() # hold the lock until timeout in other procs. barrier.wait() # hold the lock until timeout in other procs.
return fn
def acquire_read(lock_path, start=0, length=0): class AcquireRead(object):
def fn(barrier): def __init__(self, lock_path, start=0, length=0):
lock = lk.Lock(lock_path, start, length) self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
lock.acquire_read() # grab shared lock lock.acquire_read() # grab shared lock
barrier.wait() barrier.wait()
barrier.wait() # hold the lock until timeout in other procs. barrier.wait() # hold the lock until timeout in other procs.
return fn
def timeout_write(lock_path, start=0, length=0): class TimeoutWrite(object):
def fn(barrier): def __init__(self, lock_path, start=0, length=0):
lock = lk.Lock(lock_path, start, length) self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
barrier.wait() # wait for lock acquire in first process barrier.wait() # wait for lock acquire in first process
with pytest.raises(lk.LockTimeoutError): with pytest.raises(lk.LockTimeoutError):
lock.acquire_write(lock_fail_timeout) lock.acquire_write(lock_fail_timeout)
barrier.wait() barrier.wait()
return fn
def timeout_read(lock_path, start=0, length=0): class TimeoutRead(object):
def fn(barrier): def __init__(self, lock_path, start=0, length=0):
lock = lk.Lock(lock_path, start, length) self.lock_path = lock_path
self.start = start
self.length = length
@property
def __name__(self):
return self.__class__.__name__
def __call__(self, barrier):
lock = lk.Lock(self.lock_path, self.start, self.length)
barrier.wait() # wait for lock acquire in first process barrier.wait() # wait for lock acquire in first process
with pytest.raises(lk.LockTimeoutError): with pytest.raises(lk.LockTimeoutError):
lock.acquire_read(lock_fail_timeout) lock.acquire_read(lock_fail_timeout)
barrier.wait() barrier.wait()
return fn
# #
@ -322,57 +353,57 @@ def fn(barrier):
# #
def test_write_lock_timeout_on_write(lock_path): def test_write_lock_timeout_on_write(lock_path):
multiproc_test( multiproc_test(
acquire_write(lock_path), AcquireWrite(lock_path),
timeout_write(lock_path)) TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_2(lock_path): def test_write_lock_timeout_on_write_2(lock_path):
multiproc_test( multiproc_test(
acquire_write(lock_path), AcquireWrite(lock_path),
timeout_write(lock_path), TimeoutWrite(lock_path),
timeout_write(lock_path)) TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_3(lock_path): def test_write_lock_timeout_on_write_3(lock_path):
multiproc_test( multiproc_test(
acquire_write(lock_path), AcquireWrite(lock_path),
timeout_write(lock_path), TimeoutWrite(lock_path),
timeout_write(lock_path), TimeoutWrite(lock_path),
timeout_write(lock_path)) TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_ranges(lock_path): def test_write_lock_timeout_on_write_ranges(lock_path):
multiproc_test( multiproc_test(
acquire_write(lock_path, 0, 1), AcquireWrite(lock_path, 0, 1),
timeout_write(lock_path, 0, 1)) TimeoutWrite(lock_path, 0, 1))
def test_write_lock_timeout_on_write_ranges_2(lock_path): def test_write_lock_timeout_on_write_ranges_2(lock_path):
multiproc_test( multiproc_test(
acquire_write(lock_path, 0, 64), AcquireWrite(lock_path, 0, 64),
acquire_write(lock_path, 65, 1), AcquireWrite(lock_path, 65, 1),
timeout_write(lock_path, 0, 1), TimeoutWrite(lock_path, 0, 1),
timeout_write(lock_path, 63, 1)) TimeoutWrite(lock_path, 63, 1))
def test_write_lock_timeout_on_write_ranges_3(lock_path): def test_write_lock_timeout_on_write_ranges_3(lock_path):
multiproc_test( multiproc_test(
acquire_write(lock_path, 0, 1), AcquireWrite(lock_path, 0, 1),
acquire_write(lock_path, 1, 1), AcquireWrite(lock_path, 1, 1),
timeout_write(lock_path), TimeoutWrite(lock_path),
timeout_write(lock_path), TimeoutWrite(lock_path),
timeout_write(lock_path)) TimeoutWrite(lock_path))
def test_write_lock_timeout_on_write_ranges_4(lock_path): def test_write_lock_timeout_on_write_ranges_4(lock_path):
multiproc_test( multiproc_test(
acquire_write(lock_path, 0, 1), AcquireWrite(lock_path, 0, 1),
acquire_write(lock_path, 1, 1), AcquireWrite(lock_path, 1, 1),
acquire_write(lock_path, 2, 456), AcquireWrite(lock_path, 2, 456),
acquire_write(lock_path, 500, 64), AcquireWrite(lock_path, 500, 64),
timeout_write(lock_path), TimeoutWrite(lock_path),
timeout_write(lock_path), TimeoutWrite(lock_path),
timeout_write(lock_path)) TimeoutWrite(lock_path))
# #
@ -381,46 +412,46 @@ def test_write_lock_timeout_on_write_ranges_4(lock_path):
# #
def test_read_lock_timeout_on_write(lock_path): def test_read_lock_timeout_on_write(lock_path):
multiproc_test( multiproc_test(
acquire_write(lock_path), AcquireWrite(lock_path),
timeout_read(lock_path)) TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_2(lock_path): def test_read_lock_timeout_on_write_2(lock_path):
multiproc_test( multiproc_test(
acquire_write(lock_path), AcquireWrite(lock_path),
timeout_read(lock_path), TimeoutRead(lock_path),
timeout_read(lock_path)) TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_3(lock_path): def test_read_lock_timeout_on_write_3(lock_path):
multiproc_test( multiproc_test(
acquire_write(lock_path), AcquireWrite(lock_path),
timeout_read(lock_path), TimeoutRead(lock_path),
timeout_read(lock_path), TimeoutRead(lock_path),
timeout_read(lock_path)) TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_ranges(lock_path): def test_read_lock_timeout_on_write_ranges(lock_path):
"""small write lock, read whole file.""" """small write lock, read whole file."""
multiproc_test( multiproc_test(
acquire_write(lock_path, 0, 1), AcquireWrite(lock_path, 0, 1),
timeout_read(lock_path)) TimeoutRead(lock_path))
def test_read_lock_timeout_on_write_ranges_2(lock_path): def test_read_lock_timeout_on_write_ranges_2(lock_path):
"""small write lock, small read lock""" """small write lock, small read lock"""
multiproc_test( multiproc_test(
acquire_write(lock_path, 0, 1), AcquireWrite(lock_path, 0, 1),
timeout_read(lock_path, 0, 1)) TimeoutRead(lock_path, 0, 1))
def test_read_lock_timeout_on_write_ranges_3(lock_path): def test_read_lock_timeout_on_write_ranges_3(lock_path):
"""two write locks, overlapping read locks""" """two write locks, overlapping read locks"""
multiproc_test( multiproc_test(
acquire_write(lock_path, 0, 1), AcquireWrite(lock_path, 0, 1),
acquire_write(lock_path, 64, 128), AcquireWrite(lock_path, 64, 128),
timeout_read(lock_path, 0, 1), TimeoutRead(lock_path, 0, 1),
timeout_read(lock_path, 128, 256)) TimeoutRead(lock_path, 128, 256))
# #
@ -428,58 +459,58 @@ def test_read_lock_timeout_on_write_ranges_3(lock_path):
# #
def test_write_lock_timeout_on_read(lock_path): def test_write_lock_timeout_on_read(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path), AcquireRead(lock_path),
timeout_write(lock_path)) TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_2(lock_path): def test_write_lock_timeout_on_read_2(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path), AcquireRead(lock_path),
timeout_write(lock_path), TimeoutWrite(lock_path),
timeout_write(lock_path)) TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_3(lock_path): def test_write_lock_timeout_on_read_3(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path), AcquireRead(lock_path),
timeout_write(lock_path), TimeoutWrite(lock_path),
timeout_write(lock_path), TimeoutWrite(lock_path),
timeout_write(lock_path)) TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_ranges(lock_path): def test_write_lock_timeout_on_read_ranges(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path, 0, 1), AcquireRead(lock_path, 0, 1),
timeout_write(lock_path)) TimeoutWrite(lock_path))
def test_write_lock_timeout_on_read_ranges_2(lock_path): def test_write_lock_timeout_on_read_ranges_2(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path, 0, 1), AcquireRead(lock_path, 0, 1),
timeout_write(lock_path, 0, 1)) TimeoutWrite(lock_path, 0, 1))
def test_write_lock_timeout_on_read_ranges_3(lock_path): def test_write_lock_timeout_on_read_ranges_3(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path, 0, 1), AcquireRead(lock_path, 0, 1),
acquire_read(lock_path, 10, 1), AcquireRead(lock_path, 10, 1),
timeout_write(lock_path, 0, 1), TimeoutWrite(lock_path, 0, 1),
timeout_write(lock_path, 10, 1)) TimeoutWrite(lock_path, 10, 1))
def test_write_lock_timeout_on_read_ranges_4(lock_path): def test_write_lock_timeout_on_read_ranges_4(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path, 0, 64), AcquireRead(lock_path, 0, 64),
timeout_write(lock_path, 10, 1), TimeoutWrite(lock_path, 10, 1),
timeout_write(lock_path, 32, 1)) TimeoutWrite(lock_path, 32, 1))
def test_write_lock_timeout_on_read_ranges_5(lock_path): def test_write_lock_timeout_on_read_ranges_5(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path, 64, 128), AcquireRead(lock_path, 64, 128),
timeout_write(lock_path, 65, 1), TimeoutWrite(lock_path, 65, 1),
timeout_write(lock_path, 127, 1), TimeoutWrite(lock_path, 127, 1),
timeout_write(lock_path, 90, 10)) TimeoutWrite(lock_path, 90, 10))
# #
@ -487,67 +518,67 @@ def test_write_lock_timeout_on_read_ranges_5(lock_path):
# #
def test_write_lock_timeout_with_multiple_readers_2_1(lock_path): def test_write_lock_timeout_with_multiple_readers_2_1(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path), AcquireRead(lock_path),
acquire_read(lock_path), AcquireRead(lock_path),
timeout_write(lock_path)) TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_2_2(lock_path): def test_write_lock_timeout_with_multiple_readers_2_2(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path), AcquireRead(lock_path),
acquire_read(lock_path), AcquireRead(lock_path),
timeout_write(lock_path), TimeoutWrite(lock_path),
timeout_write(lock_path)) TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_3_1(lock_path): def test_write_lock_timeout_with_multiple_readers_3_1(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path), AcquireRead(lock_path),
acquire_read(lock_path), AcquireRead(lock_path),
acquire_read(lock_path), AcquireRead(lock_path),
timeout_write(lock_path)) TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_3_2(lock_path): def test_write_lock_timeout_with_multiple_readers_3_2(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path), AcquireRead(lock_path),
acquire_read(lock_path), AcquireRead(lock_path),
acquire_read(lock_path), AcquireRead(lock_path),
timeout_write(lock_path), TimeoutWrite(lock_path),
timeout_write(lock_path)) TimeoutWrite(lock_path))
def test_write_lock_timeout_with_multiple_readers_2_1_ranges(lock_path): def test_write_lock_timeout_with_multiple_readers_2_1_ranges(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path, 0, 10), AcquireRead(lock_path, 0, 10),
acquire_read(lock_path, 0.5, 10), AcquireRead(lock_path, 0.5, 10),
timeout_write(lock_path, 5, 5)) TimeoutWrite(lock_path, 5, 5))
def test_write_lock_timeout_with_multiple_readers_2_3_ranges(lock_path): def test_write_lock_timeout_with_multiple_readers_2_3_ranges(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path, 0, 10), AcquireRead(lock_path, 0, 10),
acquire_read(lock_path, 5, 15), AcquireRead(lock_path, 5, 15),
timeout_write(lock_path, 0, 1), TimeoutWrite(lock_path, 0, 1),
timeout_write(lock_path, 11, 3), TimeoutWrite(lock_path, 11, 3),
timeout_write(lock_path, 7, 1)) TimeoutWrite(lock_path, 7, 1))
def test_write_lock_timeout_with_multiple_readers_3_1_ranges(lock_path): def test_write_lock_timeout_with_multiple_readers_3_1_ranges(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path, 0, 5), AcquireRead(lock_path, 0, 5),
acquire_read(lock_path, 5, 5), AcquireRead(lock_path, 5, 5),
acquire_read(lock_path, 10, 5), AcquireRead(lock_path, 10, 5),
timeout_write(lock_path, 0, 15)) TimeoutWrite(lock_path, 0, 15))
def test_write_lock_timeout_with_multiple_readers_3_2_ranges(lock_path): def test_write_lock_timeout_with_multiple_readers_3_2_ranges(lock_path):
multiproc_test( multiproc_test(
acquire_read(lock_path, 0, 5), AcquireRead(lock_path, 0, 5),
acquire_read(lock_path, 5, 5), AcquireRead(lock_path, 5, 5),
acquire_read(lock_path, 10, 5), AcquireRead(lock_path, 10, 5),
timeout_write(lock_path, 3, 10), TimeoutWrite(lock_path, 3, 10),
timeout_write(lock_path, 5, 1)) TimeoutWrite(lock_path, 5, 1))
@pytest.mark.skipif(os.getuid() == 0, reason='user is root') @pytest.mark.skipif(os.getuid() == 0, reason='user is root')
@ -651,13 +682,12 @@ def test_upgrade_read_to_write_fails_with_readonly_file(private_lock_path):
lock.acquire_write() lock.acquire_write()
# class ComplexAcquireAndRelease(object):
# Longer test case that ensures locks are reusable. Ordering is def __init__(self, lock_path):
# enforced by barriers throughout -- steps are shown with numbers. self.lock_path = lock_path
#
def test_complex_acquire_and_release_chain(lock_path): def p1(self, barrier):
def p1(barrier): lock = lk.Lock(self.lock_path)
lock = lk.Lock(lock_path)
lock.acquire_write() lock.acquire_write()
barrier.wait() # ---------------------------------------- 1 barrier.wait() # ---------------------------------------- 1
@ -697,8 +727,8 @@ def p1(barrier):
barrier.wait() # ---------------------------------------- 13 barrier.wait() # ---------------------------------------- 13
lock.release_read() lock.release_read()
def p2(barrier): def p2(self, barrier):
lock = lk.Lock(lock_path) lock = lk.Lock(self.lock_path)
# p1 acquires write # p1 acquires write
barrier.wait() # ---------------------------------------- 1 barrier.wait() # ---------------------------------------- 1
@ -737,8 +767,8 @@ def p2(barrier):
barrier.wait() # ---------------------------------------- 13 barrier.wait() # ---------------------------------------- 13
lock.release_read() lock.release_read()
def p3(barrier): def p3(self, barrier):
lock = lk.Lock(lock_path) lock = lk.Lock(self.lock_path)
# p1 acquires write # p1 acquires write
barrier.wait() # ---------------------------------------- 1 barrier.wait() # ---------------------------------------- 1
@ -777,7 +807,16 @@ def p3(barrier):
barrier.wait() # ---------------------------------------- 13 barrier.wait() # ---------------------------------------- 13
lock.release_read() lock.release_read()
multiproc_test(p1, p2, p3)
#
# Longer test case that ensures locks are reusable. Ordering is
# enforced by barriers throughout -- steps are shown with numbers.
#
def test_complex_acquire_and_release_chain(lock_path):
test_chain = ComplexAcquireAndRelease(lock_path)
multiproc_test(test_chain.p1,
test_chain.p2,
test_chain.p3)
class AssertLock(lk.Lock): class AssertLock(lk.Lock):
@ -1146,24 +1185,26 @@ def read():
assert vals['read'] == 1 assert vals['read'] == 1
def test_lock_debug_output(lock_path): class LockDebugOutput(object):
host = socket.getfqdn() def __init__(self, lock_path):
self.lock_path = lock_path
self.host = socket.getfqdn()
def p1(barrier, q1, q2): def p1(self, barrier, q1, q2):
# exchange pids # exchange pids
p1_pid = os.getpid() p1_pid = os.getpid()
q1.put(p1_pid) q1.put(p1_pid)
p2_pid = q2.get() p2_pid = q2.get()
# set up lock # set up lock
lock = lk.Lock(lock_path, debug=True) lock = lk.Lock(self.lock_path, debug=True)
with lk.WriteTransaction(lock): with lk.WriteTransaction(lock):
# p1 takes write lock and writes pid/host to file # p1 takes write lock and writes pid/host to file
barrier.wait() # ------------------------------------ 1 barrier.wait() # ------------------------------------ 1
assert lock.pid == p1_pid assert lock.pid == p1_pid
assert lock.host == host assert lock.host == self.host
# wait for p2 to verify contents of file # wait for p2 to verify contents of file
barrier.wait() # ---------------------------------------- 2 barrier.wait() # ---------------------------------------- 2
@ -1174,21 +1215,21 @@ def p1(barrier, q1, q2):
# verify pid/host info again # verify pid/host info again
with lk.ReadTransaction(lock): with lk.ReadTransaction(lock):
assert lock.old_pid == p1_pid assert lock.old_pid == p1_pid
assert lock.old_host == host assert lock.old_host == self.host
assert lock.pid == p2_pid assert lock.pid == p2_pid
assert lock.host == host assert lock.host == self.host
barrier.wait() # ---------------------------------------- 4 barrier.wait() # ---------------------------------------- 4
def p2(barrier, q1, q2): def p2(self, barrier, q1, q2):
# exchange pids # exchange pids
p2_pid = os.getpid() p2_pid = os.getpid()
p1_pid = q1.get() p1_pid = q1.get()
q2.put(p2_pid) q2.put(p2_pid)
# set up lock # set up lock
lock = lk.Lock(lock_path, debug=True) lock = lk.Lock(self.lock_path, debug=True)
# p1 takes write lock and writes pid/host to file # p1 takes write lock and writes pid/host to file
barrier.wait() # ---------------------------------------- 1 barrier.wait() # ---------------------------------------- 1
@ -1196,25 +1237,28 @@ def p2(barrier, q1, q2):
# verify that p1 wrote information to lock file # verify that p1 wrote information to lock file
with lk.ReadTransaction(lock): with lk.ReadTransaction(lock):
assert lock.pid == p1_pid assert lock.pid == p1_pid
assert lock.host == host assert lock.host == self.host
barrier.wait() # ---------------------------------------- 2 barrier.wait() # ---------------------------------------- 2
# take a write lock on the file and verify pid/host info # take a write lock on the file and verify pid/host info
with lk.WriteTransaction(lock): with lk.WriteTransaction(lock):
assert lock.old_pid == p1_pid assert lock.old_pid == p1_pid
assert lock.old_host == host assert lock.old_host == self.host
assert lock.pid == p2_pid assert lock.pid == p2_pid
assert lock.host == host assert lock.host == self.host
barrier.wait() # ------------------------------------ 3 barrier.wait() # ------------------------------------ 3
# wait for p1 to verify pid/host info # wait for p1 to verify pid/host info
barrier.wait() # ---------------------------------------- 4 barrier.wait() # ---------------------------------------- 4
def test_lock_debug_output(lock_path):
test_debug = LockDebugOutput(lock_path)
q1, q2 = Queue(), Queue() q1, q2 = Queue(), Queue()
local_multiproc_test(p2, p1, extra_args=(q1, q2)) local_multiproc_test(test_debug.p2, test_debug.p1, extra_args=(q1, q2))
def test_lock_with_no_parent_directory(tmpdir): def test_lock_with_no_parent_directory(tmpdir):

View file

@ -396,6 +396,11 @@ def mock_shell_v_v_no_termios(proc, ctl, **kwargs):
def test_foreground_background_output( def test_foreground_background_output(
test_fn, capfd, termios_on_or_off, tmpdir): test_fn, capfd, termios_on_or_off, tmpdir):
"""Tests hitting 'v' toggles output, and that force_echo works.""" """Tests hitting 'v' toggles output, and that force_echo works."""
if (sys.version_info >= (3, 8) and sys.platform == 'darwin'
and termios_on_or_off == no_termios):
return
shell = PseudoShell(test_fn, synchronized_logger) shell = PseudoShell(test_fn, synchronized_logger)
log_path = str(tmpdir.join("log.txt")) log_path = str(tmpdir.join("log.txt"))

View file

@ -21,29 +21,36 @@
from spack.util.executable import which from spack.util.executable import which
too_long = sbang.shebang_limit + 1
short_line = "#!/this/is/short/bin/bash\n" short_line = "#!/this/is/short/bin/bash\n"
long_line = "#!/this/" + ('x' * 200) + "/is/long\n" long_line = "#!/this/" + ('x' * too_long) + "/is/long\n"
lua_line = "#!/this/" + ('x' * 200) + "/is/lua\n" lua_line = "#!/this/" + ('x' * too_long) + "/is/lua\n"
lua_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100) lua_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100)
lua_line_patched = "--!/this/" + ('x' * 200) + "/is/lua\n" lua_line_patched = "--!/this/" + ('x' * too_long) + "/is/lua\n"
node_line = "#!/this/" + ('x' * 200) + "/is/node\n" node_line = "#!/this/" + ('x' * too_long) + "/is/node\n"
node_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100) node_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100)
node_line_patched = "//!/this/" + ('x' * 200) + "/is/node\n" node_line_patched = "//!/this/" + ('x' * too_long) + "/is/node\n"
php_line = "#!/this/" + ('x' * 200) + "/is/php\n" php_line = "#!/this/" + ('x' * too_long) + "/is/php\n"
php_in_text = ("line\n") * 100 + "php\n" + ("line\n" * 100) php_in_text = ("line\n") * 100 + "php\n" + ("line\n" * 100)
php_line_patched = "<?php #!/this/" + ('x' * 200) + "/is/php\n" php_line_patched = "<?php #!/this/" + ('x' * too_long) + "/is/php\n"
php_line_patched2 = "?>\n" php_line_patched2 = "?>\n"
sbang_line = '#!/bin/sh %s/bin/sbang\n' % spack.store.layout.root
last_line = "last!\n" last_line = "last!\n"
@pytest.fixture
def sbang_line():
yield '#!/bin/sh %s/bin/sbang\n' % spack.store.layout.root
class ScriptDirectory(object): class ScriptDirectory(object):
"""Directory full of test scripts to run sbang instrumentation on.""" """Directory full of test scripts to run sbang instrumentation on."""
def __init__(self): def __init__(self, sbang_line):
self.tempdir = tempfile.mkdtemp() self.tempdir = tempfile.mkdtemp()
self.directory = os.path.join(self.tempdir, 'dir') self.directory = os.path.join(self.tempdir, 'dir')
@ -117,13 +124,13 @@ def destroy(self):
@pytest.fixture @pytest.fixture
def script_dir(): def script_dir(sbang_line):
sdir = ScriptDirectory() sdir = ScriptDirectory(sbang_line)
yield sdir yield sdir
sdir.destroy() sdir.destroy()
def test_shebang_handling(script_dir): def test_shebang_handling(script_dir, sbang_line):
assert sbang.shebang_too_long(script_dir.lua_shebang) assert sbang.shebang_too_long(script_dir.lua_shebang)
assert sbang.shebang_too_long(script_dir.long_shebang) assert sbang.shebang_too_long(script_dir.long_shebang)
@ -169,13 +176,13 @@ def test_shebang_handling(script_dir):
assert f.readline() == last_line assert f.readline() == last_line
def test_shebang_handles_non_writable_files(script_dir): def test_shebang_handles_non_writable_files(script_dir, sbang_line):
# make a file non-writable # make a file non-writable
st = os.stat(script_dir.long_shebang) st = os.stat(script_dir.long_shebang)
not_writable_mode = st.st_mode & ~stat.S_IWRITE not_writable_mode = st.st_mode & ~stat.S_IWRITE
os.chmod(script_dir.long_shebang, not_writable_mode) os.chmod(script_dir.long_shebang, not_writable_mode)
test_shebang_handling(script_dir) test_shebang_handling(script_dir, sbang_line)
st = os.stat(script_dir.long_shebang) st = os.stat(script_dir.long_shebang)
assert oct(not_writable_mode) == oct(st.st_mode) assert oct(not_writable_mode) == oct(st.st_mode)

View file

@ -8,6 +8,27 @@
import functools import functools
class Delegate(object):
def __init__(self, name, container):
self.name = name
self.container = container
def __call__(self, *args, **kwargs):
return [getattr(item, self.name)(*args, **kwargs)
for item in self.container]
class Composite(list):
def __init__(self, fns_to_delegate):
self.fns_to_delegate = fns_to_delegate
def __getattr__(self, name):
if name != 'fns_to_delegate' and name in self.fns_to_delegate:
return Delegate(name, self)
else:
return self.__getattribute__(name)
def composite(interface=None, method_list=None, container=list): def composite(interface=None, method_list=None, container=list):
"""Decorator implementing the GoF composite pattern. """Decorator implementing the GoF composite pattern.

View file

@ -49,3 +49,9 @@ def join(self, string):
Prefix: the newly created installation prefix Prefix: the newly created installation prefix
""" """
return Prefix(os.path.join(self, string)) return Prefix(os.path.join(self, string))
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)