diff --git a/.github/workflows/macos_unit_tests.yaml b/.github/workflows/macos_unit_tests.yaml index d8e5e631b6..22baac3608 100644 --- a/.github/workflows/macos_unit_tests.yaml +++ b/.github/workflows/macos_unit_tests.yaml @@ -12,13 +12,16 @@ on: jobs: build: runs-on: macos-latest + strategy: + matrix: + python-version: [3.8, 3.9] steps: - uses: actions/checkout@v2 with: fetch-depth: 0 - uses: actions/setup-python@v2 with: - python-version: 3.9 + python-version: ${{ matrix.python-version }} - name: Install Python packages run: | pip install --upgrade pip six setuptools diff --git a/bin/spack b/bin/spack index 9d8e0419ad..7e2e7e06de 100755 --- a/bin/spack +++ b/bin/spack @@ -59,6 +59,8 @@ if 'ruamel.yaml' in sys.modules: if 'ruamel' in sys.modules: del sys.modules['ruamel'] -# Once we've set up the system path, run the spack main method import spack.main # noqa -sys.exit(spack.main.main()) + +# Once we've set up the system path, run the spack main method +if __name__ == "__main__": + sys.exit(spack.main.main()) diff --git a/lib/spack/llnl/util/lang.py b/lib/spack/llnl/util/lang.py index 0bcbebba72..b9b034d4d4 100644 --- a/lib/spack/llnl/util/lang.py +++ b/lib/spack/llnl/util/lang.py @@ -568,6 +568,12 @@ def instance(self): return self._instance def __getattr__(self, name): + # When unpickling Singleton objects, the 'instance' attribute may be + # requested but not yet set. The final 'getattr' line here requires + # 'instance'/'_instance' to be defined or it will enter an infinite + # loop, so protect against that here. + if name in ['_instance', 'instance']: + raise AttributeError() return getattr(self.instance, name) def __getitem__(self, name): @@ -596,6 +602,8 @@ def __init__(self, ref_function): self.ref_function = ref_function def __getattr__(self, name): + if name == 'ref_function': + raise AttributeError() return getattr(self.ref_function(), name) def __getitem__(self, name): diff --git a/lib/spack/llnl/util/tty/log.py b/lib/spack/llnl/util/tty/log.py index 97fba0592c..658de5c596 100644 --- a/lib/spack/llnl/util/tty/log.py +++ b/lib/spack/llnl/util/tty/log.py @@ -21,7 +21,6 @@ from six import StringIO import llnl.util.tty as tty -from llnl.util.lang import fork_context try: import termios @@ -237,6 +236,8 @@ def __exit__(self, exc_type, exception, traceback): """If termios was available, restore old settings.""" if self.old_cfg: self._restore_default_terminal_settings() + if sys.version_info >= (3,): + atexit.unregister(self._restore_default_terminal_settings) # restore SIGSTP and SIGCONT handlers if self.old_handlers: @@ -288,6 +289,109 @@ def _file_descriptors_work(*streams): return False +class FileWrapper(object): + """Represents a file. Can be an open stream, a path to a file (not opened + yet), or neither. When unwrapped, it returns an open file (or file-like) + object. + """ + def __init__(self, file_like): + # This records whether the file-like object returned by "unwrap" is + # purely in-memory. In that case a subprocess will need to explicitly + # transmit the contents to the parent. + self.write_in_parent = False + + self.file_like = file_like + + if isinstance(file_like, string_types): + self.open = True + elif _file_descriptors_work(file_like): + self.open = False + else: + self.file_like = None + self.open = True + self.write_in_parent = True + + self.file = None + + def unwrap(self): + if self.open: + if self.file_like: + self.file = open(self.file_like, 'w') + else: + self.file = StringIO() + return self.file + else: + # We were handed an already-open file object. In this case we also + # will not actually close the object when requested to. + return self.file_like + + def close(self): + if self.file: + self.file.close() + + +class MultiProcessFd(object): + """Return an object which stores a file descriptor and can be passed as an + argument to a function run with ``multiprocessing.Process``, such that + the file descriptor is available in the subprocess.""" + def __init__(self, fd): + self._connection = None + self._fd = None + if sys.version_info >= (3, 8): + self._connection = multiprocessing.connection.Connection(fd) + else: + self._fd = fd + + @property + def fd(self): + if self._connection: + return self._connection._handle + else: + return self._fd + + def close(self): + if self._connection: + self._connection.close() + else: + os.close(self._fd) + + +def close_connection_and_file(multiprocess_fd, file): + # MultiprocessFd is intended to transmit a FD + # to a child process, this FD is then opened to a Python File object + # (using fdopen). In >= 3.8, MultiprocessFd encapsulates a + # multiprocessing.connection.Connection; Connection closes the FD + # when it is deleted, and prints a warning about duplicate closure if + # it is not explicitly closed. In < 3.8, MultiprocessFd encapsulates a + # simple FD; closing the FD here appears to conflict with + # closure of the File object (in < 3.8 that is). Therefore this needs + # to choose whether to close the File or the Connection. + if sys.version_info >= (3, 8): + multiprocess_fd.close() + else: + file.close() + + +@contextmanager +def replace_environment(env): + """Replace the current environment (`os.environ`) with `env`. + + If `env` is empty (or None), this unsets all current environment + variables. + """ + env = env or {} + old_env = os.environ.copy() + try: + os.environ.clear() + for name, val in env.items(): + os.environ[name] = val + yield + finally: + os.environ.clear() + for name, val in old_env.items(): + os.environ[name] = val + + class log_output(object): """Context manager that logs its output to a file. @@ -324,7 +428,8 @@ class log_output(object): work within test frameworks like nose and pytest. """ - def __init__(self, file_like=None, echo=False, debug=0, buffer=False): + def __init__(self, file_like=None, echo=False, debug=0, buffer=False, + env=None): """Create a new output log context manager. Args: @@ -352,6 +457,7 @@ def __init__(self, file_like=None, echo=False, debug=0, buffer=False): self.echo = echo self.debug = debug self.buffer = buffer + self.env = env # the environment to use for _writer_daemon self._active = False # used to prevent re-entry @@ -393,18 +499,7 @@ def __enter__(self): "file argument must be set by either __init__ or __call__") # set up a stream for the daemon to write to - self.close_log_in_parent = True - self.write_log_in_parent = False - if isinstance(self.file_like, string_types): - self.log_file = open(self.file_like, 'w') - - elif _file_descriptors_work(self.file_like): - self.log_file = self.file_like - self.close_log_in_parent = False - - else: - self.log_file = StringIO() - self.write_log_in_parent = True + self.log_file = FileWrapper(self.file_like) # record parent color settings before redirecting. We do this # because color output depends on whether the *original* stdout @@ -419,6 +514,8 @@ def __enter__(self): # OS-level pipe for redirecting output to logger read_fd, write_fd = os.pipe() + read_multiprocess_fd = MultiProcessFd(read_fd) + # Multiprocessing pipe for communication back from the daemon # Currently only used to save echo value between uses self.parent_pipe, child_pipe = multiprocessing.Pipe() @@ -427,24 +524,28 @@ def __enter__(self): try: # need to pass this b/c multiprocessing closes stdin in child. try: - input_stream = os.fdopen(os.dup(sys.stdin.fileno())) - except BaseException: - input_stream = None # just don't forward input if this fails - - self.process = fork_context.Process( - target=_writer_daemon, - args=( - input_stream, read_fd, write_fd, self.echo, self.log_file, - child_pipe + input_multiprocess_fd = MultiProcessFd( + os.dup(sys.stdin.fileno()) ) - ) - self.process.daemon = True # must set before start() - self.process.start() - os.close(read_fd) # close in the parent process + except BaseException: + # just don't forward input if this fails + input_multiprocess_fd = None + + with replace_environment(self.env): + self.process = multiprocessing.Process( + target=_writer_daemon, + args=( + input_multiprocess_fd, read_multiprocess_fd, write_fd, + self.echo, self.log_file, child_pipe + ) + ) + self.process.daemon = True # must set before start() + self.process.start() finally: - if input_stream: - input_stream.close() + if input_multiprocess_fd: + input_multiprocess_fd.close() + read_multiprocess_fd.close() # Flush immediately before redirecting so that anything buffered # goes to the original stream @@ -515,18 +616,21 @@ def __exit__(self, exc_type, exc_val, exc_tb): sys.stderr = self._saved_stderr # print log contents in parent if needed. - if self.write_log_in_parent: + if self.log_file.write_in_parent: string = self.parent_pipe.recv() self.file_like.write(string) - if self.close_log_in_parent: - self.log_file.close() - # recover and store echo settings from the child before it dies - self.echo = self.parent_pipe.recv() + try: + self.echo = self.parent_pipe.recv() + except EOFError: + # This may occur if some exception prematurely terminates the + # _writer_daemon. An exception will have already been generated. + pass - # join the daemon process. The daemon will quit automatically - # when the write pipe is closed; we just wait for it here. + # now that the write pipe is closed (in this __exit__, when we restore + # stdout with dup2), the logger daemon process loop will terminate. We + # wait for that here. self.process.join() # restore old color and debug settings @@ -555,7 +659,8 @@ def force_echo(self): sys.stdout.flush() -def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe): +def _writer_daemon(stdin_multiprocess_fd, read_multiprocess_fd, write_fd, echo, + log_file_wrapper, control_pipe): """Daemon used by ``log_output`` to write to a log file and to ``stdout``. The daemon receives output from the parent process and writes it both @@ -592,26 +697,39 @@ def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe): ``StringIO`` in the parent. This is mainly for testing. Arguments: - stdin (stream): input from the terminal - read_fd (int): pipe for reading from parent's redirected stdout - write_fd (int): parent's end of the pipe will write to (will be - immediately closed by the writer daemon) + stdin_multiprocess_fd (int): input from the terminal + read_multiprocess_fd (int): pipe for reading from parent's redirected + stdout echo (bool): initial echo setting -- controlled by user and preserved across multiple writer daemons - log_file (file-like): file to log all output + log_file_wrapper (FileWrapper): file to log all output control_pipe (Pipe): multiprocessing pipe on which to send control information to the parent """ + # If this process was forked, then it will inherit file descriptors from + # the parent process. This process depends on closing all instances of + # write_fd to terminate the reading loop, so we close the file descriptor + # here. Forking is the process spawning method everywhere except Mac OS + # for Python >= 3.8 and on Windows + if sys.version_info < (3, 8) or sys.platform != 'darwin': + os.close(write_fd) + # Use line buffering (3rd param = 1) since Python 3 has a bug # that prevents unbuffered text I/O. - in_pipe = os.fdopen(read_fd, 'r', 1) - os.close(write_fd) + in_pipe = os.fdopen(read_multiprocess_fd.fd, 'r', 1) + + if stdin_multiprocess_fd: + stdin = os.fdopen(stdin_multiprocess_fd.fd) + else: + stdin = None # list of streams to select from istreams = [in_pipe, stdin] if stdin else [in_pipe] force_echo = False # parent can force echo for certain output + log_file = log_file_wrapper.unwrap() + try: with keyboard_input(stdin) as kb: while True: @@ -672,10 +790,13 @@ def _writer_daemon(stdin, read_fd, write_fd, echo, log_file, control_pipe): # send written data back to parent if we used a StringIO if isinstance(log_file, StringIO): control_pipe.send(log_file.getvalue()) - log_file.close() + log_file_wrapper.close() + close_connection_and_file(read_multiprocess_fd, in_pipe) + if stdin_multiprocess_fd: + close_connection_and_file(stdin_multiprocess_fd, stdin) - # send echo value back to the parent so it can be preserved. - control_pipe.send(echo) + # send echo value back to the parent so it can be preserved. + control_pipe.send(echo) def _retry(function): diff --git a/lib/spack/llnl/util/tty/pty.py b/lib/spack/llnl/util/tty/pty.py index 79f4ce20cf..84c272a6e2 100644 --- a/lib/spack/llnl/util/tty/pty.py +++ b/lib/spack/llnl/util/tty/pty.py @@ -24,7 +24,6 @@ import traceback import llnl.util.tty.log as log -from llnl.util.lang import fork_context from spack.util.executable import which @@ -234,7 +233,7 @@ def start(self, **kwargs): ``minion_function``. """ - self.proc = fork_context.Process( + self.proc = multiprocessing.Process( target=PseudoShell._set_up_and_run_controller_function, args=(self.controller_function, self.minion_function, self.controller_timeout, self.sleep_time), diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py index be6b2cc1ab..a1501034d4 100644 --- a/lib/spack/spack/build_environment.py +++ b/lib/spack/spack/build_environment.py @@ -45,7 +45,8 @@ import llnl.util.tty as tty from llnl.util.tty.color import cescape, colorize from llnl.util.filesystem import mkdirp, install, install_tree -from llnl.util.lang import dedupe, fork_context +from llnl.util.lang import dedupe +from llnl.util.tty.log import MultiProcessFd import spack.build_systems.cmake import spack.build_systems.meson @@ -54,6 +55,7 @@ import spack.paths import spack.schema.environment import spack.store +import spack.subprocess_context import spack.architecture as arch from spack.util.string import plural from spack.util.environment import ( @@ -803,13 +805,72 @@ def modifications_from_dependencies(spec, context): return env -def fork(pkg, function, dirty, fake): - """Fork a child process to do part of a spack build. +def _setup_pkg_and_run(serialized_pkg, function, kwargs, child_pipe, + input_multiprocess_fd): + + try: + # We are in the child process. Python sets sys.stdin to + # open(os.devnull) to prevent our process and its parent from + # simultaneously reading from the original stdin. But, we assume + # that the parent process is not going to read from it till we + # are done with the child, so we undo Python's precaution. + if input_multiprocess_fd is not None: + sys.stdin = os.fdopen(input_multiprocess_fd.fd) + + pkg = serialized_pkg.restore() + + if not kwargs.get('fake', False): + kwargs['unmodified_env'] = os.environ.copy() + setup_package(pkg, dirty=kwargs.get('dirty', False)) + return_value = function(pkg, kwargs) + child_pipe.send(return_value) + + except StopPhase as e: + # Do not create a full ChildError from this, it's not an error + # it's a control statement. + child_pipe.send(e) + except BaseException: + # catch ANYTHING that goes wrong in the child process + exc_type, exc, tb = sys.exc_info() + + # Need to unwind the traceback in the child because traceback + # objects can't be sent to the parent. + tb_string = traceback.format_exc() + + # build up some context from the offending package so we can + # show that, too. + package_context = get_package_context(tb) + + build_log = None + try: + if hasattr(pkg, 'log_path'): + build_log = pkg.log_path + except NameError: + # 'pkg' is not defined yet + pass + + # make a pickleable exception to send to parent. + msg = "%s: %s" % (exc_type.__name__, str(exc)) + + ce = ChildError(msg, + exc_type.__module__, + exc_type.__name__, + tb_string, build_log, package_context) + child_pipe.send(ce) + + finally: + child_pipe.close() + if input_multiprocess_fd is not None: + input_multiprocess_fd.close() + + +def start_build_process(pkg, function, kwargs): + """Create a child process to do part of a spack build. Args: pkg (PackageBase): package whose environment we should set up the - forked process for. + child process for. function (callable): argless function to run in the child process. dirty (bool): If True, do NOT clean the environment before @@ -820,9 +881,9 @@ def fork(pkg, function, dirty, fake): def child_fun(): # do stuff - build_env.fork(pkg, child_fun) + build_env.start_build_process(pkg, child_fun) - Forked processes are run with the build environment set up by + The child process is run with the build environment set up by spack.build_environment. This allows package authors to have full control over the environment, etc. without affecting other builds that might be executed in the same spack call. @@ -830,64 +891,36 @@ def child_fun(): If something goes wrong, the child process catches the error and passes it to the parent wrapped in a ChildError. The parent is expected to handle (or re-raise) the ChildError. + + This uses `multiprocessing.Process` to create the child process. The + mechanism used to create the process differs on different operating + systems and for different versions of Python. In some cases "fork" + is used (i.e. the "fork" system call) and some cases it starts an + entirely new Python interpreter process (in the docs this is referred + to as the "spawn" start method). Breaking it down by OS: + + - Linux always uses fork. + - Mac OS uses fork before Python 3.8 and "spawn" for 3.8 and after. + - Windows always uses the "spawn" start method. + + For more information on `multiprocessing` child process creation + mechanisms, see https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods """ - - def child_process(child_pipe, input_stream): - # We are in the child process. Python sets sys.stdin to - # open(os.devnull) to prevent our process and its parent from - # simultaneously reading from the original stdin. But, we assume - # that the parent process is not going to read from it till we - # are done with the child, so we undo Python's precaution. - if input_stream is not None: - sys.stdin = input_stream - - try: - if not fake: - setup_package(pkg, dirty=dirty) - return_value = function() - child_pipe.send(return_value) - - except StopPhase as e: - # Do not create a full ChildError from this, it's not an error - # it's a control statement. - child_pipe.send(e) - except BaseException: - # catch ANYTHING that goes wrong in the child process - exc_type, exc, tb = sys.exc_info() - - # Need to unwind the traceback in the child because traceback - # objects can't be sent to the parent. - tb_string = traceback.format_exc() - - # build up some context from the offending package so we can - # show that, too. - package_context = get_package_context(tb) - - build_log = None - if hasattr(pkg, 'log_path'): - build_log = pkg.log_path - - # make a pickleable exception to send to parent. - msg = "%s: %s" % (exc_type.__name__, str(exc)) - - ce = ChildError(msg, - exc_type.__module__, - exc_type.__name__, - tb_string, build_log, package_context) - child_pipe.send(ce) - - finally: - child_pipe.close() - parent_pipe, child_pipe = multiprocessing.Pipe() - input_stream = None + input_multiprocess_fd = None + + serialized_pkg = spack.subprocess_context.PackageInstallContext(pkg) + try: # Forward sys.stdin when appropriate, to allow toggling verbosity if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'): - input_stream = os.fdopen(os.dup(sys.stdin.fileno())) + input_fd = os.dup(sys.stdin.fileno()) + input_multiprocess_fd = MultiProcessFd(input_fd) - p = fork_context.Process( - target=child_process, args=(child_pipe, input_stream)) + p = multiprocessing.Process( + target=_setup_pkg_and_run, + args=(serialized_pkg, function, kwargs, child_pipe, + input_multiprocess_fd)) p.start() except InstallError as e: @@ -896,8 +929,8 @@ def child_process(child_pipe, input_stream): finally: # Close the input stream in the parent process - if input_stream is not None: - input_stream.close() + if input_multiprocess_fd is not None: + input_multiprocess_fd.close() child_result = parent_pipe.recv() p.join() diff --git a/lib/spack/spack/fetch_strategy.py b/lib/spack/spack/fetch_strategy.py index c407f6908e..f075b893ae 100644 --- a/lib/spack/spack/fetch_strategy.py +++ b/lib/spack/spack/fetch_strategy.py @@ -214,14 +214,17 @@ def mirror_id(self): """BundlePackages don't have a mirror id.""" -@pattern.composite(interface=FetchStrategy) -class FetchStrategyComposite(object): +class FetchStrategyComposite(pattern.Composite): """Composite for a FetchStrategy object. - - Implements the GoF composite pattern. """ matches = FetchStrategy.matches + def __init__(self): + super(FetchStrategyComposite, self).__init__([ + 'fetch', 'check', 'expand', 'reset', 'archive', 'cachable', + 'mirror_id' + ]) + def source_id(self): component_ids = tuple(i.source_id() for i in self) if all(component_ids): diff --git a/lib/spack/spack/hooks/sbang.py b/lib/spack/spack/hooks/sbang.py index 12a2cbf6ef..d3a6174948 100644 --- a/lib/spack/spack/hooks/sbang.py +++ b/lib/spack/spack/hooks/sbang.py @@ -17,9 +17,13 @@ import spack.store -#: Character limit for shebang line. Using Linux's 127 characters -#: here, as it is the shortest I could find on a modern OS. -shebang_limit = 127 +#: OS-imposed character limit for shebang line: 127 for Linux; 511 for Mac. +#: Different Linux distributions have different limits, but 127 is the +#: smallest among all modern versions. +if sys.platform == 'darwin': + shebang_limit = 511 +else: + shebang_limit = 127 def sbang_install_path(): diff --git a/lib/spack/spack/installer.py b/lib/spack/spack/installer.py index 6494f6d922..51919f3729 100644 --- a/lib/spack/spack/installer.py +++ b/lib/spack/spack/installer.py @@ -1056,15 +1056,9 @@ def _install_task(self, task, **kwargs): task (BuildTask): the installation build task for a package""" cache_only = kwargs.get('cache_only', False) - dirty = kwargs.get('dirty', False) - fake = kwargs.get('fake', False) - install_source = kwargs.get('install_source', False) - keep_stage = kwargs.get('keep_stage', False) - skip_patch = kwargs.get('skip_patch', False) tests = kwargs.get('tests', False) unsigned = kwargs.get('unsigned', False) use_cache = kwargs.get('use_cache', True) - verbose = kwargs.get('verbose', False) full_hash_match = kwargs.get('full_hash_match', False) pkg = task.pkg @@ -1087,110 +1081,6 @@ def _install_task(self, task, **kwargs): pkg.run_tests = (tests is True or tests and pkg.name in tests) - pid = '{0}: '.format(self.pid) if tty.show_pid() else '' - pre = '{0}{1}:'.format(pid, pkg.name) - - def build_process(): - """ - This function implements the process forked for each build. - - It has its own process and python module space set up by - build_environment.fork(). - - This function's return value is returned to the parent process. - """ - start_time = time.time() - if not fake: - if not skip_patch: - pkg.do_patch() - else: - pkg.do_stage() - - pkg_id = package_id(pkg) - tty.debug('{0} Building {1} [{2}]' - .format(pre, pkg_id, pkg.build_system_class)) - - # get verbosity from do_install() parameter or saved value - echo = verbose - if spack.package.PackageBase._verbose is not None: - echo = spack.package.PackageBase._verbose - - pkg.stage.keep = keep_stage - - # parent process already has a prefix write lock - with pkg.stage: - # Run the pre-install hook in the child process after - # the directory is created. - spack.hooks.pre_install(pkg.spec) - if fake: - _do_fake_install(pkg) - else: - source_path = pkg.stage.source_path - if install_source and os.path.isdir(source_path): - src_target = os.path.join(pkg.spec.prefix, 'share', - pkg.name, 'src') - tty.debug('{0} Copying source to {1}' - .format(pre, src_target)) - fs.install_tree(pkg.stage.source_path, src_target) - - # Do the real install in the source directory. - with fs.working_dir(pkg.stage.source_path): - # Save the build environment in a file before building. - dump_environment(pkg.env_path) - - for attr in ('configure_args', 'cmake_args'): - try: - configure_args = getattr(pkg, attr)() - configure_args = ' '.join(configure_args) - - with open(pkg.configure_args_path, 'w') as \ - args_file: - args_file.write(configure_args) - - break - except Exception: - pass - - # cache debug settings - debug_level = tty.debug_level() - - # Spawn a daemon that reads from a pipe and redirects - # everything to log_path - with log_output(pkg.log_path, echo, True) as logger: - for phase_name, phase_attr in zip( - pkg.phases, pkg._InstallPhase_phases): - - with logger.force_echo(): - inner_debug_level = tty.debug_level() - tty.set_debug(debug_level) - tty.msg("{0} Executing phase: '{1}'" - .format(pre, phase_name)) - tty.set_debug(inner_debug_level) - - # Redirect stdout and stderr to daemon pipe - phase = getattr(pkg, phase_attr) - phase(pkg.spec, pkg.prefix) - - echo = logger.echo - log(pkg) - - # Run post install hooks before build stage is removed. - spack.hooks.post_install(pkg.spec) - - # Stop the timer - pkg._total_time = time.time() - start_time - build_time = pkg._total_time - pkg._fetch_time - - tty.debug('{0} Successfully installed {1}' - .format(pre, pkg_id), - 'Fetch: {0}. Build: {1}. Total: {2}.' - .format(_hms(pkg._fetch_time), _hms(build_time), - _hms(pkg._total_time))) - _print_installed_pkg(pkg.prefix) - - # preserve verbosity across runs - return echo - # hook that allows tests to inspect the Package before installation # see unit_test_check() docs. if not pkg.unit_test_check(): @@ -1199,10 +1089,12 @@ def build_process(): try: self._setup_install_dir(pkg) - # Fork a child to do the actual installation. + # Create a child process to do the actual installation. # Preserve verbosity settings across installs. - spack.package.PackageBase._verbose = spack.build_environment.fork( - pkg, build_process, dirty=dirty, fake=fake) + spack.package.PackageBase._verbose = ( + spack.build_environment.start_build_process( + pkg, build_process, kwargs) + ) # Note: PARENT of the build process adds the new package to # the database, so that we don't need to re-read from file. @@ -1216,9 +1108,9 @@ def build_process(): except spack.build_environment.StopPhase as e: # A StopPhase exception means that do_install was asked to # stop early from clients, and is not an error at this point - pre = '{0}'.format(self.pid) if tty.show_pid() else '' + pid = '{0}: '.format(pkg.pid) if tty.show_pid() else '' tty.debug('{0}{1}'.format(pid, str(e))) - tty.debug('Package stage directory : {0}' + tty.debug('Package stage directory: {0}' .format(pkg.stage.source_path)) _install_task.__doc__ += install_args_docstring @@ -1666,6 +1558,117 @@ def spec(self): return self.pkg.spec +def build_process(pkg, kwargs): + """Perform the installation/build of the package. + + This runs in a separate child process, and has its own process and + python module space set up by build_environment.start_build_process(). + + This function's return value is returned to the parent process. + """ + keep_stage = kwargs.get('keep_stage', False) + install_source = kwargs.get('install_source', False) + skip_patch = kwargs.get('skip_patch', False) + verbose = kwargs.get('verbose', False) + fake = kwargs.get('fake', False) + unmodified_env = kwargs.get('unmodified_env', {}) + + start_time = time.time() + if not fake: + if not skip_patch: + pkg.do_patch() + else: + pkg.do_stage() + + pid = '{0}: '.format(pkg.pid) if tty.show_pid() else '' + pre = '{0}{1}:'.format(pid, pkg.name) + pkg_id = package_id(pkg) + + tty.debug('{0} Building {1} [{2}]' + .format(pre, pkg_id, pkg.build_system_class)) + + # get verbosity from do_install() parameter or saved value + echo = verbose + if spack.package.PackageBase._verbose is not None: + echo = spack.package.PackageBase._verbose + + pkg.stage.keep = keep_stage + with pkg.stage: + # Run the pre-install hook in the child process after + # the directory is created. + spack.hooks.pre_install(pkg.spec) + if fake: + _do_fake_install(pkg) + else: + source_path = pkg.stage.source_path + if install_source and os.path.isdir(source_path): + src_target = os.path.join(pkg.spec.prefix, 'share', + pkg.name, 'src') + tty.debug('{0} Copying source to {1}' + .format(pre, src_target)) + fs.install_tree(pkg.stage.source_path, src_target) + + # Do the real install in the source directory. + with fs.working_dir(pkg.stage.source_path): + # Save the build environment in a file before building. + dump_environment(pkg.env_path) + + for attr in ('configure_args', 'cmake_args'): + try: + configure_args = getattr(pkg, attr)() + configure_args = ' '.join(configure_args) + + with open(pkg.configure_args_path, 'w') as \ + args_file: + args_file.write(configure_args) + + break + except Exception: + pass + + # cache debug settings + debug_level = tty.debug_level() + + # Spawn a daemon that reads from a pipe and redirects + # everything to log_path + with log_output(pkg.log_path, echo, True, + env=unmodified_env) as logger: + + for phase_name, phase_attr in zip( + pkg.phases, pkg._InstallPhase_phases): + + with logger.force_echo(): + inner_debug_level = tty.debug_level() + tty.set_debug(debug_level) + tty.msg("{0} Executing phase: '{1}'" + .format(pre, phase_name)) + tty.set_debug(inner_debug_level) + + # Redirect stdout and stderr to daemon pipe + phase = getattr(pkg, phase_attr) + phase(pkg.spec, pkg.prefix) + + echo = logger.echo + log(pkg) + + # Run post install hooks before build stage is removed. + spack.hooks.post_install(pkg.spec) + + # Stop the timer + pkg._total_time = time.time() - start_time + build_time = pkg._total_time - pkg._fetch_time + + tty.debug('{0} Successfully installed {1}' + .format(pre, pkg_id), + 'Fetch: {0}. Build: {1}. Total: {2}.' + .format(_hms(pkg._fetch_time), _hms(build_time), + _hms(pkg._total_time))) + _print_installed_pkg(pkg.prefix) + + # preserve verbosity across runs + return echo + + class BuildTask(object): """Class for representing the build task for a package.""" diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py index a4e91fc186..a692d04052 100644 --- a/lib/spack/spack/package.py +++ b/lib/spack/spack/package.py @@ -901,6 +901,10 @@ def _make_resource_stage(self, root_stage, fetcher, resource): path=self.path) return stage + def _download_search(self): + dynamic_fetcher = fs.from_list_url(self) + return [dynamic_fetcher] if dynamic_fetcher else [] + def _make_root_stage(self, fetcher): # Construct a mirror path (TODO: get this out of package.py) mirror_paths = spack.mirror.mirror_archive_paths( @@ -912,12 +916,8 @@ def _make_root_stage(self, fetcher): stage_name = "{0}{1}-{2}-{3}".format(stage_prefix, s.name, s.version, s.dag_hash()) - def download_search(): - dynamic_fetcher = fs.from_list_url(self) - return [dynamic_fetcher] if dynamic_fetcher else [] - stage = Stage(fetcher, mirror_paths=mirror_paths, name=stage_name, - path=self.path, search_fn=download_search) + path=self.path, search_fn=self._download_search) return stage def _make_stage(self): diff --git a/lib/spack/spack/repo.py b/lib/spack/spack/repo.py index 8d31f2cf49..d9dddce6b6 100644 --- a/lib/spack/spack/repo.py +++ b/lib/spack/spack/repo.py @@ -1164,14 +1164,14 @@ def create_or_construct(path, namespace=None): return Repo(path) -def _path(): +def _path(repo_dirs=None): """Get the singleton RepoPath instance for Spack. Create a RepoPath, add it to sys.meta_path, and return it. TODO: consider not making this a singleton. """ - repo_dirs = spack.config.get('repos') + repo_dirs = repo_dirs or spack.config.get('repos') if not repo_dirs: raise NoRepoConfiguredError( "Spack configuration contains no package repositories.") diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py index 256e3edc83..9a13a7db46 100644 --- a/lib/spack/spack/spec.py +++ b/lib/spack/spack/spec.py @@ -919,6 +919,12 @@ def __set__(self, instance, value): raise AttributeError(msg.format(cls_name, self.attribute_name)) +# Represents a query state in a BuildInterface object +QueryState = collections.namedtuple( + 'QueryState', ['name', 'extra_parameters', 'isvirtual'] +) + + class SpecBuildInterface(lang.ObjectWrapper): command = ForwardQueryToPackage( 'command', @@ -938,11 +944,6 @@ class SpecBuildInterface(lang.ObjectWrapper): def __init__(self, spec, name, query_parameters): super(SpecBuildInterface, self).__init__(spec) - # Represents a query state in a BuildInterface object - QueryState = collections.namedtuple( - 'QueryState', ['name', 'extra_parameters', 'isvirtual'] - ) - is_virtual = Spec.is_virtual(name) self.last_query = QueryState( name=name, diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py index f254b60fd5..719d408d84 100644 --- a/lib/spack/spack/stage.py +++ b/lib/spack/spack/stage.py @@ -690,11 +690,7 @@ def _add_to_root_stage(self): install(src, destination_path) -@pattern.composite(method_list=[ - 'fetch', 'create', 'created', 'check', 'expand_archive', 'restage', - 'destroy', 'cache_local', 'cache_mirror', 'steal_source', - 'managed_by_spack']) -class StageComposite: +class StageComposite(pattern.Composite): """Composite for Stage type objects. The first item in this composite is considered to be the root package, and operations that return a value are forwarded to it.""" @@ -702,6 +698,12 @@ class StageComposite: # __enter__ and __exit__ delegate to all stages in the composite. # + def __init__(self): + super(StageComposite, self).__init__([ + 'fetch', 'create', 'created', 'check', 'expand_archive', 'restage', + 'destroy', 'cache_local', 'cache_mirror', 'steal_source', + 'managed_by_spack']) + def __enter__(self): for item in self: item.__enter__() diff --git a/lib/spack/spack/store.py b/lib/spack/spack/store.py index 756a0a2e03..75c3969362 100644 --- a/lib/spack/spack/store.py +++ b/lib/spack/spack/store.py @@ -103,10 +103,23 @@ def _store(): #: Singleton store instance store = llnl.util.lang.Singleton(_store) + +def _store_root(): + return store.root + + +def _store_db(): + return store.db + + +def _store_layout(): + return store.layout + + # convenience accessors for parts of the singleton store -root = llnl.util.lang.LazyReference(lambda: store.root) -db = llnl.util.lang.LazyReference(lambda: store.db) -layout = llnl.util.lang.LazyReference(lambda: store.layout) +root = llnl.util.lang.LazyReference(_store_root) +db = llnl.util.lang.LazyReference(_store_db) +layout = llnl.util.lang.LazyReference(_store_layout) def retrieve_upstream_dbs(): diff --git a/lib/spack/spack/subprocess_context.py b/lib/spack/spack/subprocess_context.py new file mode 100644 index 0000000000..b8e157740f --- /dev/null +++ b/lib/spack/spack/subprocess_context.py @@ -0,0 +1,149 @@ +# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other +# Spack Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: (Apache-2.0 OR MIT) + +""" +This module handles transmission of Spack state to child processes started +using the 'spawn' start method. Notably, installations are performed in a +subprocess and require transmitting the Package object (in such a way +that the repository is available for importing when it is deserialized); +installations performed in Spack unit tests may include additional +modifications to global state in memory that must be replicated in the +child process. +""" + +from types import ModuleType + +import pickle +import pydoc +import io +import sys +import multiprocessing + +import spack.architecture +import spack.config + + +_serialize = sys.version_info >= (3, 8) and sys.platform == 'darwin' + + +patches = None + + +def append_patch(patch): + global patches + if not patches: + patches = list() + patches.append(patch) + + +def serialize(obj): + serialized_obj = io.BytesIO() + pickle.dump(obj, serialized_obj) + serialized_obj.seek(0) + return serialized_obj + + +class SpackTestProcess(object): + def __init__(self, fn): + self.fn = fn + + def _restore_and_run(self, fn, test_state): + test_state.restore() + fn() + + def create(self): + test_state = TestState() + return multiprocessing.Process( + target=self._restore_and_run, + args=(self.fn, test_state)) + + +class PackageInstallContext(object): + """Captures the in-memory process state of a package installation that + needs to be transmitted to a child process. + """ + def __init__(self, pkg): + if _serialize: + self.serialized_pkg = serialize(pkg) + else: + self.pkg = pkg + self.test_state = TestState() + + def restore(self): + self.test_state.restore() + if _serialize: + return pickle.load(self.serialized_pkg) + else: + return self.pkg + + +class TestState(object): + """Spack tests may modify state that is normally read from disk in memory; + this object is responsible for properly serializing that state to be + applied to a subprocess. This isn't needed outside of a testing environment + but this logic is designed to behave the same inside or outside of tests. + """ + def __init__(self): + if _serialize: + self.repo_dirs = list(r.root for r in spack.repo.path.repos) + self.config = spack.config.config + self.platform = spack.architecture.platform + self.test_patches = store_patches() + + # TODO: transfer spack.store.store? note that you should not + # transfer spack.store.store and spack.store.db: 'db' is a + # shortcut that accesses the store (so transferring both can + # create an inconsistency). Some tests set 'db' directly, and + # others set 'store' + + def restore(self): + if _serialize: + spack.repo.path = spack.repo._path(self.repo_dirs) + spack.config.config = self.config + spack.architecture.platform = self.platform + self.test_patches.restore() + + +class TestPatches(object): + def __init__(self, module_patches, class_patches): + self.module_patches = list( + (x, y, serialize(z)) for (x, y, z) in module_patches) + self.class_patches = list( + (x, y, serialize(z)) for (x, y, z) in class_patches) + + def restore(self): + for module_name, attr_name, value in self.module_patches: + value = pickle.load(value) + module = __import__(module_name) + setattr(module, attr_name, value) + for class_fqn, attr_name, value in self.class_patches: + value = pickle.load(value) + cls = pydoc.locate(class_fqn) + setattr(cls, attr_name, value) + + +def store_patches(): + global patches + module_patches = list() + class_patches = list() + if not patches: + return TestPatches(list(), list()) + for patch in patches: + for target, name, _ in patches: + if isinstance(target, ModuleType): + new_val = getattr(target, name) + module_name = target.__name__ + module_patches.append((module_name, name, new_val)) + elif isinstance(target, type): + new_val = getattr(target, name) + class_fqn = '.'.join([target.__module__, target.__name__]) + class_patches.append((class_fqn, name, new_val)) + + return TestPatches(module_patches, class_patches) + + +def clear_patches(): + global patches + patches = None diff --git a/lib/spack/spack/test/bindist.py b/lib/spack/spack/test/bindist.py index 3949df117e..ef54e658f0 100644 --- a/lib/spack/spack/test/bindist.py +++ b/lib/spack/spack/test/bindist.py @@ -149,9 +149,11 @@ def install_dir_default_layout(tmpdir): spack.store.store = spack.store.Store(str(tmpdir.join('opt'))) spack.store.layout = YamlDirectoryLayout(str(tmpdir.join('opt')), path_scheme=def_install_path_scheme) # noqa: E501 - yield spack.store - spack.store.store = real_store - spack.store.layout = real_layout + try: + yield spack.store + finally: + spack.store.store = real_store + spack.store.layout = real_layout @pytest.fixture(scope='function') @@ -162,9 +164,11 @@ def install_dir_non_default_layout(tmpdir): spack.store.store = spack.store.Store(str(tmpdir.join('opt'))) spack.store.layout = YamlDirectoryLayout(str(tmpdir.join('opt')), path_scheme=ndef_install_path_scheme) # noqa: E501 - yield spack.store - spack.store.store = real_store - spack.store.layout = real_layout + try: + yield spack.store + finally: + spack.store.store = real_store + spack.store.layout = real_layout args = ['strings', 'file'] @@ -582,6 +586,13 @@ def test_built_spec_cache(tmpdir, mirror.mirror(mparser, margs) +def fake_full_hash(spec): + # Generate an arbitrary hash that is intended to be different than + # whatever a Spec reported before (to test actions that trigger when + # the hash changes) + return 'tal4c7h4z0gqmixb1eqa92mjoybxn5l6' + + def test_spec_needs_rebuild(install_mockery_mutable_config, mock_packages, mock_fetch, monkeypatch, tmpdir): """Make sure needs_rebuild properly compares remote full_hash @@ -606,9 +617,6 @@ def test_spec_needs_rebuild(install_mockery_mutable_config, mock_packages, assert not rebuild # Now monkey patch Spec to change the full hash on the package - def fake_full_hash(spec): - print('fake_full_hash') - return 'tal4c7h4z0gqmixb1eqa92mjoybxn5l6' monkeypatch.setattr(spack.spec.Spec, 'full_hash', fake_full_hash) rebuild = bindist.needs_rebuild(s, mirror_url, rebuild_on_errors=True) diff --git a/lib/spack/spack/test/cmd/dev_build.py b/lib/spack/spack/test/cmd/dev_build.py index 111e500dcd..f11025b7b1 100644 --- a/lib/spack/spack/test/cmd/dev_build.py +++ b/lib/spack/spack/test/cmd/dev_build.py @@ -108,20 +108,24 @@ def test_dev_build_before_until(tmpdir, mock_packages, install_mockery): 'dev-build-test-install@0.0.0') +def print_spack_cc(*args): + # Eat arguments and print environment variable to test + print(os.environ.get('CC', '')) + + +# `module unload cray-libsci` in test environment causes failure +# It does not fail for actual installs +# build_environment.py imports module directly, so we monkeypatch it there +# rather than in module_cmd +def mock_module_noop(*args): + pass + + def test_dev_build_drop_in(tmpdir, mock_packages, monkeypatch, install_mockery): - def print_spack_cc(*args): - # Eat arguments and print environment variable to test - print(os.environ.get('CC', '')) monkeypatch.setattr(os, 'execvp', print_spack_cc) - # `module unload cray-libsci` in test environment causes failure - # It does not fail for actual installs - # build_environment.py imports module directly, so we monkeypatch it there - # rather than in module_cmd - def module(*args): - pass - monkeypatch.setattr(spack.build_environment, 'module', module) + monkeypatch.setattr(spack.build_environment, 'module', mock_module_noop) with tmpdir.as_cwd(): output = dev_build('-b', 'edit', '--drop-in', 'sh', diff --git a/lib/spack/spack/test/cmd/install.py b/lib/spack/spack/test/cmd/install.py index 4af3e8a339..2d7571ef4b 100644 --- a/lib/spack/spack/test/cmd/install.py +++ b/lib/spack/spack/test/cmd/install.py @@ -811,6 +811,13 @@ def test_install_fails_no_args_suggests_env_activation(tmpdir): assert 'using the `spack.yaml` in this directory' in output +def fake_full_hash(spec): + # Generate an arbitrary hash that is intended to be different than + # whatever a Spec reported before (to test actions that trigger when + # the hash changes) + return 'tal4c7h4z0gqmixb1eqa92mjoybxn5l6' + + def test_cache_install_full_hash_match( install_mockery_mutable_config, mock_packages, mock_fetch, mock_archive, mutable_config, monkeypatch, tmpdir): @@ -843,9 +850,6 @@ def test_cache_install_full_hash_match( uninstall('-y', s.name) # Now monkey patch Spec to change the full hash on the package - def fake_full_hash(spec): - print('fake_full_hash') - return 'tal4c7h4z0gqmixb1eqa92mjoybxn5l6' monkeypatch.setattr(spack.spec.Spec, 'full_hash', fake_full_hash) # Check that even if the full hash changes, we install from binary when diff --git a/lib/spack/spack/test/conftest.py b/lib/spack/spack/test/conftest.py index f43d6933ba..af77684329 100644 --- a/lib/spack/spack/test/conftest.py +++ b/lib/spack/spack/test/conftest.py @@ -34,6 +34,7 @@ import spack.stage import spack.util.executable import spack.util.gpg +import spack.subprocess_context import spack.util.spack_yaml as syaml from spack.util.pattern import Bunch @@ -41,11 +42,34 @@ from spack.fetch_strategy import FetchError +@pytest.fixture(autouse=True) +def clear_recorded_monkeypatches(): + yield + spack.subprocess_context.clear_patches() + + +@pytest.fixture(scope='session', autouse=True) +def record_monkeypatch_setattr(): + import _pytest + saved_setattr = _pytest.monkeypatch.MonkeyPatch.setattr + + def record_setattr(cls, target, name, value, *args, **kwargs): + spack.subprocess_context.append_patch((target, name, value)) + saved_setattr(cls, target, name, value, *args, **kwargs) + + _pytest.monkeypatch.MonkeyPatch.setattr = record_setattr + try: + yield + finally: + _pytest.monkeypatch.MonkeyPatch.setattr = saved_setattr + + +def _can_access(path, perms): + return False + + @pytest.fixture def no_path_access(monkeypatch): - def _can_access(path, perms): - return False - monkeypatch.setattr(os, 'access', _can_access) @@ -70,6 +94,10 @@ def clean_user_environment(): ev.activate(active) +def _verify_executables_noop(*args): + return None + + # # Disable checks on compiler executable existence # @@ -86,7 +114,7 @@ def mock_compiler_executable_verification(request, monkeypatch): if 'enable_compiler_verification' not in request.keywords: monkeypatch.setattr(spack.compiler.Compiler, 'verify_executables', - lambda x: None) + _verify_executables_noop) # Hooks to add command line options or set other custom behaviors. @@ -245,25 +273,27 @@ def check_for_leftover_stage_files(request, mock_stage, ignore_stage_files): assert not files_in_stage +class MockCache(object): + def store(self, copy_cmd, relative_dest): + pass + + def fetcher(self, target_path, digest, **kwargs): + return MockCacheFetcher() + + +class MockCacheFetcher(object): + def fetch(self): + raise FetchError('Mock cache always fails for tests') + + def __str__(self): + return "[mock fetch cache]" + + @pytest.fixture(autouse=True) def mock_fetch_cache(monkeypatch): """Substitutes spack.paths.fetch_cache with a mock object that does nothing and raises on fetch. """ - class MockCache(object): - def store(self, copy_cmd, relative_dest): - pass - - def fetcher(self, target_path, digest, **kwargs): - return MockCacheFetcher() - - class MockCacheFetcher(object): - def fetch(self): - raise FetchError('Mock cache always fails for tests') - - def __str__(self): - return "[mock fetch cache]" - monkeypatch.setattr(spack.caches, 'fetch_cache', MockCache()) @@ -286,7 +316,15 @@ def _skip_if_missing_executables(request): # FIXME: session-scope. Anyhow doing it is not easy, as it seems # FIXME: there's some weird interaction with compilers during concretization. spack.architecture.real_platform = spack.architecture.platform -spack.architecture.platform = lambda: spack.platforms.test.Test() + + +def test_platform(): + return spack.platforms.test.Test() + + +spack.architecture.platform = test_platform + + # FIXME: Since we change the architecture above, we have to (re)initialize # FIXME: the config singleton. If it gets initialized too early with the # FIXME: actual architecture, tests will fail. @@ -356,14 +394,15 @@ def mock_repo_path(): yield spack.repo.RepoPath(spack.paths.mock_packages_path) +def _pkg_install_fn(pkg, spec, prefix): + # sanity_check_prefix requires something in the install directory + mkdirp(prefix.bin) + + @pytest.fixture def mock_pkg_install(monkeypatch): - def _pkg_install_fn(pkg, spec, prefix): - # sanity_check_prefix requires something in the install directory - mkdirp(prefix.bin) - - monkeypatch.setattr(spack.package.PackageBase, 'install', _pkg_install_fn, - raising=False) + monkeypatch.setattr(spack.package.PackageBase, 'install', + _pkg_install_fn, raising=False) @pytest.fixture(scope='function') @@ -580,10 +619,10 @@ def mock_store(tmpdir_factory, mock_repo_path, mock_configuration, @pytest.fixture(scope='function') -def database(mock_store, mock_packages, config): +def database(mock_store, mock_packages, config, monkeypatch): """This activates the mock store, packages, AND config.""" - with use_store(mock_store): - yield mock_store.db + monkeypatch.setattr(spack.store, 'store', mock_store) + yield mock_store.db # Force reading the database again between tests mock_store.db.last_seen_verifier = '' @@ -631,6 +670,10 @@ def dirs_with_libfiles(tmpdir_factory): yield lib_to_dirs, all_dirs +def _compiler_link_paths_noop(*args): + return [] + + @pytest.fixture(scope='function', autouse=True) def disable_compiler_execution(monkeypatch, request): """ @@ -641,31 +684,27 @@ def disable_compiler_execution(monkeypatch, request): If a test is marked in that way this is a no-op.""" if 'enable_compiler_link_paths' not in request.keywords: - def noop(*args): - return [] - # Compiler.determine_implicit_rpaths actually runs the compiler. So # replace that function with a noop that simulates finding no implicit # RPATHs monkeypatch.setattr( spack.compiler.Compiler, '_get_compiler_link_paths', - noop + _compiler_link_paths_noop ) @pytest.fixture(scope='function') def install_mockery(tmpdir, config, mock_packages, monkeypatch): """Hooks a fake install directory, DB, and stage directory into Spack.""" - real_store = spack.store.store - spack.store.store = spack.store.Store(str(tmpdir.join('opt'))) + monkeypatch.setattr( + spack.store, 'store', spack.store.Store(str(tmpdir.join('opt')))) # We use a fake package, so temporarily disable checksumming with spack.config.override('config:checksum', False): yield tmpdir.join('opt').remove() - spack.store.store = real_store # Also wipe out any cached prefix failure locks (associated with # the session-scoped mock archive). @@ -687,31 +726,24 @@ def install_mockery_mutable_config( also need to modify configuration (and hence would want to use 'mutable config'): 'install_mockery' does not support this. """ - real_store = spack.store.store - spack.store.store = spack.store.Store(str(tmpdir.join('opt'))) + monkeypatch.setattr( + spack.store, 'store', spack.store.Store(str(tmpdir.join('opt')))) # We use a fake package, so temporarily disable checksumming with spack.config.override('config:checksum', False): yield tmpdir.join('opt').remove() - spack.store.store = real_store @pytest.fixture() -def mock_fetch(mock_archive): +def mock_fetch(mock_archive, monkeypatch): """Fake the URL for a package so it downloads from a file.""" - fetcher = FetchStrategyComposite() - fetcher.append(URLFetchStrategy(mock_archive.url)) + mock_fetcher = FetchStrategyComposite() + mock_fetcher.append(URLFetchStrategy(mock_archive.url)) - @property - def fake_fn(self): - return fetcher - - orig_fn = spack.package.PackageBase.fetcher - spack.package.PackageBase.fetcher = fake_fn - yield - spack.package.PackageBase.fetcher = orig_fn + monkeypatch.setattr( + spack.package.PackageBase, 'fetcher', mock_fetcher) class MockLayout(object): @@ -737,6 +769,47 @@ def create_layout(root): yield create_layout +class MockConfig(object): + def __init__(self, configuration, writer_key): + self._configuration = configuration + self.writer_key = writer_key + + def configuration(self): + return self._configuration + + def writer_configuration(self): + return self.configuration()[self.writer_key] + + +class ConfigUpdate(object): + def __init__(self, root_for_conf, writer_mod, writer_key, monkeypatch): + self.root_for_conf = root_for_conf + self.writer_mod = writer_mod + self.writer_key = writer_key + self.monkeypatch = monkeypatch + + def __call__(self, filename): + file = os.path.join(self.root_for_conf, filename + '.yaml') + with open(file) as f: + mock_config = MockConfig(syaml.load_config(f), self.writer_key) + + self.monkeypatch.setattr( + spack.modules.common, + 'configuration', + mock_config.configuration + ) + self.monkeypatch.setattr( + self.writer_mod, + 'configuration', + mock_config.writer_configuration + ) + self.monkeypatch.setattr( + self.writer_mod, + 'configuration_registry', + {} + ) + + @pytest.fixture() def module_configuration(monkeypatch, request): """Reads the module configuration file from the mock ones prepared @@ -753,34 +826,7 @@ def module_configuration(monkeypatch, request): spack.paths.test_path, 'data', 'modules', writer_key ) - def _impl(filename): - - file = os.path.join(root_for_conf, filename + '.yaml') - with open(file) as f: - configuration = syaml.load_config(f) - - def mock_config_function(): - return configuration - - def writer_key_function(): - return mock_config_function()[writer_key] - - monkeypatch.setattr( - spack.modules.common, - 'configuration', - mock_config_function - ) - monkeypatch.setattr( - writer_mod, - 'configuration', - writer_key_function - ) - monkeypatch.setattr( - writer_mod, - 'configuration_registry', - {} - ) - return _impl + return ConfigUpdate(root_for_conf, writer_mod, writer_key, monkeypatch) @pytest.fixture() diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py index a97243ae9c..473573fa2e 100644 --- a/lib/spack/spack/test/database.py +++ b/lib/spack/spack/test/database.py @@ -23,7 +23,6 @@ import llnl.util.lock as lk from llnl.util.tty.colify import colify -from llnl.util.lang import fork_context import spack.repo import spack.store @@ -319,7 +318,7 @@ def _check_merkleiness(): def _check_db_sanity(database): - """Utiilty function to check db against install layout.""" + """Utility function to check db against install layout.""" pkg_in_layout = sorted(spack.store.layout.all_specs()) actual = sorted(database.query()) @@ -517,14 +516,20 @@ def test_026_reindex_after_deprecate(mutable_database): _check_db_sanity(mutable_database) -def test_030_db_sanity_from_another_process(mutable_database): - def read_and_modify(): +class ReadModify(object): + """Provide a function which can execute in a separate process that removes + a spec from the database. + """ + def __call__(self): # check that other process can read DB - _check_db_sanity(mutable_database) - with mutable_database.write_transaction(): + _check_db_sanity(spack.store.db) + with spack.store.db.write_transaction(): _mock_remove('mpileaks ^zmpi') - p = fork_context.Process(target=read_and_modify, args=()) + +def test_030_db_sanity_from_another_process(mutable_database): + spack_process = spack.subprocess_context.SpackTestProcess(ReadModify()) + p = spack_process.create() p.start() p.join() diff --git a/lib/spack/spack/test/install.py b/lib/spack/spack/test/install.py index 47ec06bbc4..61475e4dc6 100644 --- a/lib/spack/spack/test/install.py +++ b/lib/spack/spack/test/install.py @@ -19,6 +19,11 @@ _spack_configure_argsfile) +def find_nothing(*args): + raise spack.repo.UnknownPackageError( + 'Repo package access is disabled for test') + + def test_install_and_uninstall(install_mockery, mock_fetch, monkeypatch): # Get a basic concrete spec for the trivial install package. spec = Spec('trivial-install-test-package') @@ -28,10 +33,6 @@ def test_install_and_uninstall(install_mockery, mock_fetch, monkeypatch): # Get the package pkg = spec.package - def find_nothing(*args): - raise spack.repo.UnknownPackageError( - 'Repo package access is disabled for test') - try: pkg.do_install() @@ -83,23 +84,25 @@ def create(self): self.wrapped_stage.create() def __getattr__(self, attr): + if attr == 'wrapped_stage': + # This attribute may not be defined at some point during unpickling + raise AttributeError() return getattr(self.wrapped_stage, attr) def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch): spec = Spec('canfail').concretized() pkg = spack.repo.get(spec) - remove_prefix = spack.package.Package.remove_prefix instance_rm_prefix = pkg.remove_prefix try: pkg.succeed = False - spack.package.Package.remove_prefix = mock_remove_prefix + pkg.remove_prefix = mock_remove_prefix with pytest.raises(MockInstallError): pkg.do_install() assert os.path.isdir(pkg.prefix) rm_prefix_checker = RemovePrefixChecker(instance_rm_prefix) - spack.package.Package.remove_prefix = rm_prefix_checker.remove_prefix + pkg.remove_prefix = rm_prefix_checker.remove_prefix # must clear failure markings for the package before re-installing it spack.store.db.clear_failure(spec, True) @@ -113,7 +116,7 @@ def test_partial_install_delete_prefix_and_stage(install_mockery, mock_fetch): assert pkg.installed finally: - spack.package.Package.remove_prefix = remove_prefix + pkg.remove_prefix = instance_rm_prefix def test_dont_add_patches_to_installed_package(install_mockery, mock_fetch): @@ -181,7 +184,8 @@ def test_flatten_deps( def test_installed_upstream_external( - tmpdir_factory, install_mockery, mock_fetch, gen_mock_layout): + tmpdir_factory, install_mockery, mock_fetch, gen_mock_layout, + monkeypatch): """Check that when a dependency package is recorded as installed in an upstream database that it is not reinstalled. """ @@ -194,29 +198,26 @@ def test_installed_upstream_external( dependency.concretize() prepared_db.add(dependency, upstream_layout) - try: - original_db = spack.store.db - downstream_db_root = str( - tmpdir_factory.mktemp('mock_downstream_db_root')) - spack.store.db = spack.database.Database( - downstream_db_root, upstream_dbs=[prepared_db]) - dependent = spack.spec.Spec('externaltest') - dependent.concretize() + downstream_db_root = str( + tmpdir_factory.mktemp('mock_downstream_db_root')) + db_for_test = spack.database.Database( + downstream_db_root, upstream_dbs=[prepared_db]) + monkeypatch.setattr(spack.store, 'db', db_for_test) + dependent = spack.spec.Spec('externaltest') + dependent.concretize() - new_dependency = dependent['externaltool'] - assert new_dependency.external - assert new_dependency.prefix == '/path/to/external_tool' + new_dependency = dependent['externaltool'] + assert new_dependency.external + assert new_dependency.prefix == '/path/to/external_tool' - dependent.package.do_install() + dependent.package.do_install() - assert not os.path.exists(new_dependency.prefix) - assert os.path.exists(dependent.prefix) - finally: - spack.store.db = original_db + assert not os.path.exists(new_dependency.prefix) + assert os.path.exists(dependent.prefix) def test_installed_upstream(tmpdir_factory, install_mockery, mock_fetch, - gen_mock_layout): + gen_mock_layout, monkeypatch): """Check that when a dependency package is recorded as installed in an upstream database that it is not reinstalled. """ @@ -229,26 +230,23 @@ def test_installed_upstream(tmpdir_factory, install_mockery, mock_fetch, dependency.concretize() prepared_db.add(dependency, upstream_layout) - try: - original_db = spack.store.db - downstream_db_root = str( - tmpdir_factory.mktemp('mock_downstream_db_root')) - spack.store.db = spack.database.Database( - downstream_db_root, upstream_dbs=[prepared_db]) - dependent = spack.spec.Spec('dependent-install') - dependent.concretize() + downstream_db_root = str( + tmpdir_factory.mktemp('mock_downstream_db_root')) + db_for_test = spack.database.Database( + downstream_db_root, upstream_dbs=[prepared_db]) + monkeypatch.setattr(spack.store, 'db', db_for_test) + dependent = spack.spec.Spec('dependent-install') + dependent.concretize() - new_dependency = dependent['dependency-install'] - assert new_dependency.package.installed_upstream - assert (new_dependency.prefix == - upstream_layout.path_for_spec(dependency)) + new_dependency = dependent['dependency-install'] + assert new_dependency.package.installed_upstream + assert (new_dependency.prefix == + upstream_layout.path_for_spec(dependency)) - dependent.package.do_install() + dependent.package.do_install() - assert not os.path.exists(new_dependency.prefix) - assert os.path.exists(dependent.prefix) - finally: - spack.store.db = original_db + assert not os.path.exists(new_dependency.prefix) + assert os.path.exists(dependent.prefix) @pytest.mark.disable_clean_stage_check diff --git a/lib/spack/spack/test/installer.py b/lib/spack/spack/test/installer.py index 09ba78f58d..99f0af709c 100644 --- a/lib/spack/spack/test/installer.py +++ b/lib/spack/spack/test/installer.py @@ -626,7 +626,7 @@ def _add(_compilers): # Preclude any meaningful side-effects monkeypatch.setattr(spack.package.PackageBase, 'unit_test_check', _true) monkeypatch.setattr(inst.PackageInstaller, '_setup_install_dir', _noop) - monkeypatch.setattr(spack.build_environment, 'fork', _noop) + monkeypatch.setattr(spack.build_environment, 'start_build_process', _noop) monkeypatch.setattr(spack.database.Database, 'add', _noop) monkeypatch.setattr(spack.compilers, 'add_compilers_to_config', _add) diff --git a/lib/spack/spack/test/llnl/util/lock.py b/lib/spack/spack/test/llnl/util/lock.py index 596a801179..800c9d63ad 100644 --- a/lib/spack/spack/test/llnl/util/lock.py +++ b/lib/spack/spack/test/llnl/util/lock.py @@ -53,14 +53,13 @@ import glob import getpass from contextlib import contextmanager -from multiprocessing import Queue +from multiprocessing import Process, Queue import pytest import llnl.util.lock as lk import llnl.util.multiproc as mp from llnl.util.filesystem import touch -from llnl.util.lang import fork_context # @@ -217,7 +216,7 @@ def local_multiproc_test(*functions, **kwargs): b = mp.Barrier(len(functions), timeout=barrier_timeout) args = (b,) + tuple(kwargs.get('extra_args', ())) - procs = [fork_context.Process(target=f, args=args, name=f.__name__) + procs = [Process(target=f, args=args, name=f.__name__) for f in functions] for p in procs: @@ -278,42 +277,74 @@ def wait(self): # # Process snippets below can be composed into tests. # -def acquire_write(lock_path, start=0, length=0): - def fn(barrier): - lock = lk.Lock(lock_path, start, length) +class AcquireWrite(object): + def __init__(self, lock_path, start=0, length=0): + self.lock_path = lock_path + self.start = start + self.length = length + + @property + def __name__(self): + return self.__class__.__name__ + + def __call__(self, barrier): + lock = lk.Lock(self.lock_path, self.start, self.length) lock.acquire_write() # grab exclusive lock barrier.wait() barrier.wait() # hold the lock until timeout in other procs. - return fn -def acquire_read(lock_path, start=0, length=0): - def fn(barrier): - lock = lk.Lock(lock_path, start, length) +class AcquireRead(object): + def __init__(self, lock_path, start=0, length=0): + self.lock_path = lock_path + self.start = start + self.length = length + + @property + def __name__(self): + return self.__class__.__name__ + + def __call__(self, barrier): + lock = lk.Lock(self.lock_path, self.start, self.length) lock.acquire_read() # grab shared lock barrier.wait() barrier.wait() # hold the lock until timeout in other procs. - return fn -def timeout_write(lock_path, start=0, length=0): - def fn(barrier): - lock = lk.Lock(lock_path, start, length) +class TimeoutWrite(object): + def __init__(self, lock_path, start=0, length=0): + self.lock_path = lock_path + self.start = start + self.length = length + + @property + def __name__(self): + return self.__class__.__name__ + + def __call__(self, barrier): + lock = lk.Lock(self.lock_path, self.start, self.length) barrier.wait() # wait for lock acquire in first process with pytest.raises(lk.LockTimeoutError): lock.acquire_write(lock_fail_timeout) barrier.wait() - return fn -def timeout_read(lock_path, start=0, length=0): - def fn(barrier): - lock = lk.Lock(lock_path, start, length) +class TimeoutRead(object): + def __init__(self, lock_path, start=0, length=0): + self.lock_path = lock_path + self.start = start + self.length = length + + @property + def __name__(self): + return self.__class__.__name__ + + def __call__(self, barrier): + lock = lk.Lock(self.lock_path, self.start, self.length) barrier.wait() # wait for lock acquire in first process with pytest.raises(lk.LockTimeoutError): lock.acquire_read(lock_fail_timeout) barrier.wait() - return fn # @@ -322,57 +353,57 @@ def fn(barrier): # def test_write_lock_timeout_on_write(lock_path): multiproc_test( - acquire_write(lock_path), - timeout_write(lock_path)) + AcquireWrite(lock_path), + TimeoutWrite(lock_path)) def test_write_lock_timeout_on_write_2(lock_path): multiproc_test( - acquire_write(lock_path), - timeout_write(lock_path), - timeout_write(lock_path)) + AcquireWrite(lock_path), + TimeoutWrite(lock_path), + TimeoutWrite(lock_path)) def test_write_lock_timeout_on_write_3(lock_path): multiproc_test( - acquire_write(lock_path), - timeout_write(lock_path), - timeout_write(lock_path), - timeout_write(lock_path)) + AcquireWrite(lock_path), + TimeoutWrite(lock_path), + TimeoutWrite(lock_path), + TimeoutWrite(lock_path)) def test_write_lock_timeout_on_write_ranges(lock_path): multiproc_test( - acquire_write(lock_path, 0, 1), - timeout_write(lock_path, 0, 1)) + AcquireWrite(lock_path, 0, 1), + TimeoutWrite(lock_path, 0, 1)) def test_write_lock_timeout_on_write_ranges_2(lock_path): multiproc_test( - acquire_write(lock_path, 0, 64), - acquire_write(lock_path, 65, 1), - timeout_write(lock_path, 0, 1), - timeout_write(lock_path, 63, 1)) + AcquireWrite(lock_path, 0, 64), + AcquireWrite(lock_path, 65, 1), + TimeoutWrite(lock_path, 0, 1), + TimeoutWrite(lock_path, 63, 1)) def test_write_lock_timeout_on_write_ranges_3(lock_path): multiproc_test( - acquire_write(lock_path, 0, 1), - acquire_write(lock_path, 1, 1), - timeout_write(lock_path), - timeout_write(lock_path), - timeout_write(lock_path)) + AcquireWrite(lock_path, 0, 1), + AcquireWrite(lock_path, 1, 1), + TimeoutWrite(lock_path), + TimeoutWrite(lock_path), + TimeoutWrite(lock_path)) def test_write_lock_timeout_on_write_ranges_4(lock_path): multiproc_test( - acquire_write(lock_path, 0, 1), - acquire_write(lock_path, 1, 1), - acquire_write(lock_path, 2, 456), - acquire_write(lock_path, 500, 64), - timeout_write(lock_path), - timeout_write(lock_path), - timeout_write(lock_path)) + AcquireWrite(lock_path, 0, 1), + AcquireWrite(lock_path, 1, 1), + AcquireWrite(lock_path, 2, 456), + AcquireWrite(lock_path, 500, 64), + TimeoutWrite(lock_path), + TimeoutWrite(lock_path), + TimeoutWrite(lock_path)) # @@ -381,46 +412,46 @@ def test_write_lock_timeout_on_write_ranges_4(lock_path): # def test_read_lock_timeout_on_write(lock_path): multiproc_test( - acquire_write(lock_path), - timeout_read(lock_path)) + AcquireWrite(lock_path), + TimeoutRead(lock_path)) def test_read_lock_timeout_on_write_2(lock_path): multiproc_test( - acquire_write(lock_path), - timeout_read(lock_path), - timeout_read(lock_path)) + AcquireWrite(lock_path), + TimeoutRead(lock_path), + TimeoutRead(lock_path)) def test_read_lock_timeout_on_write_3(lock_path): multiproc_test( - acquire_write(lock_path), - timeout_read(lock_path), - timeout_read(lock_path), - timeout_read(lock_path)) + AcquireWrite(lock_path), + TimeoutRead(lock_path), + TimeoutRead(lock_path), + TimeoutRead(lock_path)) def test_read_lock_timeout_on_write_ranges(lock_path): """small write lock, read whole file.""" multiproc_test( - acquire_write(lock_path, 0, 1), - timeout_read(lock_path)) + AcquireWrite(lock_path, 0, 1), + TimeoutRead(lock_path)) def test_read_lock_timeout_on_write_ranges_2(lock_path): """small write lock, small read lock""" multiproc_test( - acquire_write(lock_path, 0, 1), - timeout_read(lock_path, 0, 1)) + AcquireWrite(lock_path, 0, 1), + TimeoutRead(lock_path, 0, 1)) def test_read_lock_timeout_on_write_ranges_3(lock_path): """two write locks, overlapping read locks""" multiproc_test( - acquire_write(lock_path, 0, 1), - acquire_write(lock_path, 64, 128), - timeout_read(lock_path, 0, 1), - timeout_read(lock_path, 128, 256)) + AcquireWrite(lock_path, 0, 1), + AcquireWrite(lock_path, 64, 128), + TimeoutRead(lock_path, 0, 1), + TimeoutRead(lock_path, 128, 256)) # @@ -428,58 +459,58 @@ def test_read_lock_timeout_on_write_ranges_3(lock_path): # def test_write_lock_timeout_on_read(lock_path): multiproc_test( - acquire_read(lock_path), - timeout_write(lock_path)) + AcquireRead(lock_path), + TimeoutWrite(lock_path)) def test_write_lock_timeout_on_read_2(lock_path): multiproc_test( - acquire_read(lock_path), - timeout_write(lock_path), - timeout_write(lock_path)) + AcquireRead(lock_path), + TimeoutWrite(lock_path), + TimeoutWrite(lock_path)) def test_write_lock_timeout_on_read_3(lock_path): multiproc_test( - acquire_read(lock_path), - timeout_write(lock_path), - timeout_write(lock_path), - timeout_write(lock_path)) + AcquireRead(lock_path), + TimeoutWrite(lock_path), + TimeoutWrite(lock_path), + TimeoutWrite(lock_path)) def test_write_lock_timeout_on_read_ranges(lock_path): multiproc_test( - acquire_read(lock_path, 0, 1), - timeout_write(lock_path)) + AcquireRead(lock_path, 0, 1), + TimeoutWrite(lock_path)) def test_write_lock_timeout_on_read_ranges_2(lock_path): multiproc_test( - acquire_read(lock_path, 0, 1), - timeout_write(lock_path, 0, 1)) + AcquireRead(lock_path, 0, 1), + TimeoutWrite(lock_path, 0, 1)) def test_write_lock_timeout_on_read_ranges_3(lock_path): multiproc_test( - acquire_read(lock_path, 0, 1), - acquire_read(lock_path, 10, 1), - timeout_write(lock_path, 0, 1), - timeout_write(lock_path, 10, 1)) + AcquireRead(lock_path, 0, 1), + AcquireRead(lock_path, 10, 1), + TimeoutWrite(lock_path, 0, 1), + TimeoutWrite(lock_path, 10, 1)) def test_write_lock_timeout_on_read_ranges_4(lock_path): multiproc_test( - acquire_read(lock_path, 0, 64), - timeout_write(lock_path, 10, 1), - timeout_write(lock_path, 32, 1)) + AcquireRead(lock_path, 0, 64), + TimeoutWrite(lock_path, 10, 1), + TimeoutWrite(lock_path, 32, 1)) def test_write_lock_timeout_on_read_ranges_5(lock_path): multiproc_test( - acquire_read(lock_path, 64, 128), - timeout_write(lock_path, 65, 1), - timeout_write(lock_path, 127, 1), - timeout_write(lock_path, 90, 10)) + AcquireRead(lock_path, 64, 128), + TimeoutWrite(lock_path, 65, 1), + TimeoutWrite(lock_path, 127, 1), + TimeoutWrite(lock_path, 90, 10)) # @@ -487,67 +518,67 @@ def test_write_lock_timeout_on_read_ranges_5(lock_path): # def test_write_lock_timeout_with_multiple_readers_2_1(lock_path): multiproc_test( - acquire_read(lock_path), - acquire_read(lock_path), - timeout_write(lock_path)) + AcquireRead(lock_path), + AcquireRead(lock_path), + TimeoutWrite(lock_path)) def test_write_lock_timeout_with_multiple_readers_2_2(lock_path): multiproc_test( - acquire_read(lock_path), - acquire_read(lock_path), - timeout_write(lock_path), - timeout_write(lock_path)) + AcquireRead(lock_path), + AcquireRead(lock_path), + TimeoutWrite(lock_path), + TimeoutWrite(lock_path)) def test_write_lock_timeout_with_multiple_readers_3_1(lock_path): multiproc_test( - acquire_read(lock_path), - acquire_read(lock_path), - acquire_read(lock_path), - timeout_write(lock_path)) + AcquireRead(lock_path), + AcquireRead(lock_path), + AcquireRead(lock_path), + TimeoutWrite(lock_path)) def test_write_lock_timeout_with_multiple_readers_3_2(lock_path): multiproc_test( - acquire_read(lock_path), - acquire_read(lock_path), - acquire_read(lock_path), - timeout_write(lock_path), - timeout_write(lock_path)) + AcquireRead(lock_path), + AcquireRead(lock_path), + AcquireRead(lock_path), + TimeoutWrite(lock_path), + TimeoutWrite(lock_path)) def test_write_lock_timeout_with_multiple_readers_2_1_ranges(lock_path): multiproc_test( - acquire_read(lock_path, 0, 10), - acquire_read(lock_path, 0.5, 10), - timeout_write(lock_path, 5, 5)) + AcquireRead(lock_path, 0, 10), + AcquireRead(lock_path, 0.5, 10), + TimeoutWrite(lock_path, 5, 5)) def test_write_lock_timeout_with_multiple_readers_2_3_ranges(lock_path): multiproc_test( - acquire_read(lock_path, 0, 10), - acquire_read(lock_path, 5, 15), - timeout_write(lock_path, 0, 1), - timeout_write(lock_path, 11, 3), - timeout_write(lock_path, 7, 1)) + AcquireRead(lock_path, 0, 10), + AcquireRead(lock_path, 5, 15), + TimeoutWrite(lock_path, 0, 1), + TimeoutWrite(lock_path, 11, 3), + TimeoutWrite(lock_path, 7, 1)) def test_write_lock_timeout_with_multiple_readers_3_1_ranges(lock_path): multiproc_test( - acquire_read(lock_path, 0, 5), - acquire_read(lock_path, 5, 5), - acquire_read(lock_path, 10, 5), - timeout_write(lock_path, 0, 15)) + AcquireRead(lock_path, 0, 5), + AcquireRead(lock_path, 5, 5), + AcquireRead(lock_path, 10, 5), + TimeoutWrite(lock_path, 0, 15)) def test_write_lock_timeout_with_multiple_readers_3_2_ranges(lock_path): multiproc_test( - acquire_read(lock_path, 0, 5), - acquire_read(lock_path, 5, 5), - acquire_read(lock_path, 10, 5), - timeout_write(lock_path, 3, 10), - timeout_write(lock_path, 5, 1)) + AcquireRead(lock_path, 0, 5), + AcquireRead(lock_path, 5, 5), + AcquireRead(lock_path, 10, 5), + TimeoutWrite(lock_path, 3, 10), + TimeoutWrite(lock_path, 5, 1)) @pytest.mark.skipif(os.getuid() == 0, reason='user is root') @@ -651,13 +682,12 @@ def test_upgrade_read_to_write_fails_with_readonly_file(private_lock_path): lock.acquire_write() -# -# Longer test case that ensures locks are reusable. Ordering is -# enforced by barriers throughout -- steps are shown with numbers. -# -def test_complex_acquire_and_release_chain(lock_path): - def p1(barrier): - lock = lk.Lock(lock_path) +class ComplexAcquireAndRelease(object): + def __init__(self, lock_path): + self.lock_path = lock_path + + def p1(self, barrier): + lock = lk.Lock(self.lock_path) lock.acquire_write() barrier.wait() # ---------------------------------------- 1 @@ -697,8 +727,8 @@ def p1(barrier): barrier.wait() # ---------------------------------------- 13 lock.release_read() - def p2(barrier): - lock = lk.Lock(lock_path) + def p2(self, barrier): + lock = lk.Lock(self.lock_path) # p1 acquires write barrier.wait() # ---------------------------------------- 1 @@ -737,8 +767,8 @@ def p2(barrier): barrier.wait() # ---------------------------------------- 13 lock.release_read() - def p3(barrier): - lock = lk.Lock(lock_path) + def p3(self, barrier): + lock = lk.Lock(self.lock_path) # p1 acquires write barrier.wait() # ---------------------------------------- 1 @@ -777,7 +807,16 @@ def p3(barrier): barrier.wait() # ---------------------------------------- 13 lock.release_read() - multiproc_test(p1, p2, p3) + +# +# Longer test case that ensures locks are reusable. Ordering is +# enforced by barriers throughout -- steps are shown with numbers. +# +def test_complex_acquire_and_release_chain(lock_path): + test_chain = ComplexAcquireAndRelease(lock_path) + multiproc_test(test_chain.p1, + test_chain.p2, + test_chain.p3) class AssertLock(lk.Lock): @@ -1146,24 +1185,26 @@ def read(): assert vals['read'] == 1 -def test_lock_debug_output(lock_path): - host = socket.getfqdn() +class LockDebugOutput(object): + def __init__(self, lock_path): + self.lock_path = lock_path + self.host = socket.getfqdn() - def p1(barrier, q1, q2): + def p1(self, barrier, q1, q2): # exchange pids p1_pid = os.getpid() q1.put(p1_pid) p2_pid = q2.get() # set up lock - lock = lk.Lock(lock_path, debug=True) + lock = lk.Lock(self.lock_path, debug=True) with lk.WriteTransaction(lock): # p1 takes write lock and writes pid/host to file barrier.wait() # ------------------------------------ 1 assert lock.pid == p1_pid - assert lock.host == host + assert lock.host == self.host # wait for p2 to verify contents of file barrier.wait() # ---------------------------------------- 2 @@ -1174,21 +1215,21 @@ def p1(barrier, q1, q2): # verify pid/host info again with lk.ReadTransaction(lock): assert lock.old_pid == p1_pid - assert lock.old_host == host + assert lock.old_host == self.host assert lock.pid == p2_pid - assert lock.host == host + assert lock.host == self.host barrier.wait() # ---------------------------------------- 4 - def p2(barrier, q1, q2): + def p2(self, barrier, q1, q2): # exchange pids p2_pid = os.getpid() p1_pid = q1.get() q2.put(p2_pid) # set up lock - lock = lk.Lock(lock_path, debug=True) + lock = lk.Lock(self.lock_path, debug=True) # p1 takes write lock and writes pid/host to file barrier.wait() # ---------------------------------------- 1 @@ -1196,25 +1237,28 @@ def p2(barrier, q1, q2): # verify that p1 wrote information to lock file with lk.ReadTransaction(lock): assert lock.pid == p1_pid - assert lock.host == host + assert lock.host == self.host barrier.wait() # ---------------------------------------- 2 # take a write lock on the file and verify pid/host info with lk.WriteTransaction(lock): assert lock.old_pid == p1_pid - assert lock.old_host == host + assert lock.old_host == self.host assert lock.pid == p2_pid - assert lock.host == host + assert lock.host == self.host barrier.wait() # ------------------------------------ 3 # wait for p1 to verify pid/host info barrier.wait() # ---------------------------------------- 4 + +def test_lock_debug_output(lock_path): + test_debug = LockDebugOutput(lock_path) q1, q2 = Queue(), Queue() - local_multiproc_test(p2, p1, extra_args=(q1, q2)) + local_multiproc_test(test_debug.p2, test_debug.p1, extra_args=(q1, q2)) def test_lock_with_no_parent_directory(tmpdir): diff --git a/lib/spack/spack/test/llnl/util/tty/log.py b/lib/spack/spack/test/llnl/util/tty/log.py index c6cef00977..811b6c6642 100644 --- a/lib/spack/spack/test/llnl/util/tty/log.py +++ b/lib/spack/spack/test/llnl/util/tty/log.py @@ -396,6 +396,11 @@ def mock_shell_v_v_no_termios(proc, ctl, **kwargs): def test_foreground_background_output( test_fn, capfd, termios_on_or_off, tmpdir): """Tests hitting 'v' toggles output, and that force_echo works.""" + if (sys.version_info >= (3, 8) and sys.platform == 'darwin' + and termios_on_or_off == no_termios): + + return + shell = PseudoShell(test_fn, synchronized_logger) log_path = str(tmpdir.join("log.txt")) diff --git a/lib/spack/spack/test/sbang.py b/lib/spack/spack/test/sbang.py index 18bbe9f345..ca7d2a6fe6 100644 --- a/lib/spack/spack/test/sbang.py +++ b/lib/spack/spack/test/sbang.py @@ -21,29 +21,36 @@ from spack.util.executable import which +too_long = sbang.shebang_limit + 1 + + short_line = "#!/this/is/short/bin/bash\n" -long_line = "#!/this/" + ('x' * 200) + "/is/long\n" +long_line = "#!/this/" + ('x' * too_long) + "/is/long\n" -lua_line = "#!/this/" + ('x' * 200) + "/is/lua\n" +lua_line = "#!/this/" + ('x' * too_long) + "/is/lua\n" lua_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100) -lua_line_patched = "--!/this/" + ('x' * 200) + "/is/lua\n" +lua_line_patched = "--!/this/" + ('x' * too_long) + "/is/lua\n" -node_line = "#!/this/" + ('x' * 200) + "/is/node\n" +node_line = "#!/this/" + ('x' * too_long) + "/is/node\n" node_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100) -node_line_patched = "//!/this/" + ('x' * 200) + "/is/node\n" +node_line_patched = "//!/this/" + ('x' * too_long) + "/is/node\n" -php_line = "#!/this/" + ('x' * 200) + "/is/php\n" +php_line = "#!/this/" + ('x' * too_long) + "/is/php\n" php_in_text = ("line\n") * 100 + "php\n" + ("line\n" * 100) -php_line_patched = "\n" -sbang_line = '#!/bin/sh %s/bin/sbang\n' % spack.store.layout.root last_line = "last!\n" +@pytest.fixture +def sbang_line(): + yield '#!/bin/sh %s/bin/sbang\n' % spack.store.layout.root + + class ScriptDirectory(object): """Directory full of test scripts to run sbang instrumentation on.""" - def __init__(self): + def __init__(self, sbang_line): self.tempdir = tempfile.mkdtemp() self.directory = os.path.join(self.tempdir, 'dir') @@ -117,13 +124,13 @@ def destroy(self): @pytest.fixture -def script_dir(): - sdir = ScriptDirectory() +def script_dir(sbang_line): + sdir = ScriptDirectory(sbang_line) yield sdir sdir.destroy() -def test_shebang_handling(script_dir): +def test_shebang_handling(script_dir, sbang_line): assert sbang.shebang_too_long(script_dir.lua_shebang) assert sbang.shebang_too_long(script_dir.long_shebang) @@ -169,13 +176,13 @@ def test_shebang_handling(script_dir): assert f.readline() == last_line -def test_shebang_handles_non_writable_files(script_dir): +def test_shebang_handles_non_writable_files(script_dir, sbang_line): # make a file non-writable st = os.stat(script_dir.long_shebang) not_writable_mode = st.st_mode & ~stat.S_IWRITE os.chmod(script_dir.long_shebang, not_writable_mode) - test_shebang_handling(script_dir) + test_shebang_handling(script_dir, sbang_line) st = os.stat(script_dir.long_shebang) assert oct(not_writable_mode) == oct(st.st_mode) diff --git a/lib/spack/spack/util/pattern.py b/lib/spack/spack/util/pattern.py index f467e2d18e..fcbb4e6baa 100644 --- a/lib/spack/spack/util/pattern.py +++ b/lib/spack/spack/util/pattern.py @@ -8,6 +8,27 @@ import functools +class Delegate(object): + def __init__(self, name, container): + self.name = name + self.container = container + + def __call__(self, *args, **kwargs): + return [getattr(item, self.name)(*args, **kwargs) + for item in self.container] + + +class Composite(list): + def __init__(self, fns_to_delegate): + self.fns_to_delegate = fns_to_delegate + + def __getattr__(self, name): + if name != 'fns_to_delegate' and name in self.fns_to_delegate: + return Delegate(name, self) + else: + return self.__getattribute__(name) + + def composite(interface=None, method_list=None, container=list): """Decorator implementing the GoF composite pattern. diff --git a/lib/spack/spack/util/prefix.py b/lib/spack/spack/util/prefix.py index 804790c1d3..10ec50d30c 100644 --- a/lib/spack/spack/util/prefix.py +++ b/lib/spack/spack/util/prefix.py @@ -49,3 +49,9 @@ def join(self, string): Prefix: the newly created installation prefix """ return Prefix(os.path.join(self, string)) + + def __getstate__(self): + return self.__dict__ + + def __setstate__(self, d): + self.__dict__.update(d)