Merge branch 'develop' into mplegendre-multi_pkgsrc_roots
Conflicts: lib/spack/spack/__init__.py lib/spack/spack/directives.py lib/spack/spack/packages.py
This commit is contained in:
commit
55662eca69
93 changed files with 2989 additions and 284 deletions
22
README.md
22
README.md
|
@ -1,5 +1,5 @@
|
|||
Spack
|
||||
===========
|
||||
![image](share/spack/logo/spack-logo-text-64.png "Spack")
|
||||
============
|
||||
|
||||
Spack is a package management tool designed to support multiple
|
||||
versions and configurations of software on a wide variety of platforms
|
||||
|
@ -13,7 +13,7 @@ can coexist on the same system.
|
|||
Most importantly, Spack is simple. It offers a simple spec syntax so
|
||||
that users can specify versions and configuration options
|
||||
concisely. Spack is also simple for package authors: package files are
|
||||
writtin in pure Python, and specs allow package authors to write a
|
||||
written in pure Python, and specs allow package authors to write a
|
||||
single build script for many different builds of the same package.
|
||||
|
||||
See the
|
||||
|
@ -62,21 +62,9 @@ latest stable release.
|
|||
|
||||
Authors
|
||||
----------------
|
||||
Spack was written by Todd Gamblin, tgamblin@llnl.gov.
|
||||
Many thanks go to Spack's [contributors](https://github.com/scalability-llnl/spack/graphs/contributors).
|
||||
|
||||
Significant contributions were also made by:
|
||||
|
||||
* David Beckingsale
|
||||
* David Boehme
|
||||
* Alfredo Gimenez
|
||||
* Luc Jaulmes
|
||||
* Matt Legendre
|
||||
* Greg Lee
|
||||
* Adam Moody
|
||||
* Saravan Pantham
|
||||
* Joachim Protze
|
||||
* Bob Robey
|
||||
* Justin Too
|
||||
Spack was originally written by Todd Gamblin, tgamblin@llnl.gov.
|
||||
|
||||
Release
|
||||
----------------
|
||||
|
|
|
@ -118,7 +118,7 @@ def main():
|
|||
|
||||
# If the user asked for it, don't check ssl certs.
|
||||
if args.insecure:
|
||||
tty.warn("You asked for --insecure, which does not check SSL certificates or checksums.")
|
||||
tty.warn("You asked for --insecure, which does not check SSL certificates.")
|
||||
spack.curl.add_default_arg('-k')
|
||||
|
||||
# Try to load the particular command asked for and run it
|
||||
|
|
|
@ -149,7 +149,7 @@
|
|||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = [('show_copyright', False)]
|
||||
html_theme_options = { 'logo_only' : True }
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
html_theme_path = ["_themes"]
|
||||
|
@ -163,12 +163,12 @@
|
|||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
html_logo = '../../../share/spack/logo/spack-logo-white-text-48.png'
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
html_favicon = '../../../share/spack/logo/favicon.ico'
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
|
|
|
@ -222,7 +222,7 @@ def working_dir(dirname, **kwargs):
|
|||
|
||||
def touch(path):
|
||||
"""Creates an empty file at the specified path."""
|
||||
with closing(open(path, 'a')) as file:
|
||||
with open(path, 'a') as file:
|
||||
os.utime(path, None)
|
||||
|
||||
|
||||
|
|
|
@ -88,10 +88,7 @@ def index_by(objects, *funcs):
|
|||
result = {}
|
||||
for o in objects:
|
||||
key = f(o)
|
||||
if key not in result:
|
||||
result[key] = [o]
|
||||
else:
|
||||
result[key].append(o)
|
||||
result.setdefault(key, []).append(o)
|
||||
|
||||
for key, objects in result.items():
|
||||
result[key] = index_by(objects, *funcs[1:])
|
||||
|
|
175
lib/spack/llnl/util/lock.py
Normal file
175
lib/spack/llnl/util/lock.py
Normal file
|
@ -0,0 +1,175 @@
|
|||
##############################################################################
|
||||
# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
|
||||
# Produced at the Lawrence Livermore National Laboratory.
|
||||
#
|
||||
# This file is part of Spack.
|
||||
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
|
||||
# LLNL-CODE-647188
|
||||
#
|
||||
# For details, see https://scalability-llnl.github.io/spack
|
||||
# Please also see the LICENSE file for our notice and the LGPL.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License (as published by
|
||||
# the Free Software Foundation) version 2.1 dated February 1999.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
|
||||
# conditions of the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
##############################################################################
|
||||
import os
|
||||
import fcntl
|
||||
import errno
|
||||
import time
|
||||
import socket
|
||||
|
||||
# Default timeout in seconds, after which locks will raise exceptions.
|
||||
_default_timeout = 60
|
||||
|
||||
# Sleep time per iteration in spin loop (in seconds)
|
||||
_sleep_time = 1e-5
|
||||
|
||||
|
||||
class Lock(object):
|
||||
def __init__(self,file_path):
|
||||
self._file_path = file_path
|
||||
self._fd = None
|
||||
self._reads = 0
|
||||
self._writes = 0
|
||||
|
||||
|
||||
def _lock(self, op, timeout):
|
||||
"""This takes a lock using POSIX locks (``fnctl.lockf``).
|
||||
|
||||
The lock is implemented as a spin lock using a nonblocking
|
||||
call to lockf().
|
||||
|
||||
On acquiring an exclusive lock, the lock writes this process's
|
||||
pid and host to the lock file, in case the holding process
|
||||
needs to be killed later.
|
||||
|
||||
If the lock times out, it raises a ``LockError``.
|
||||
"""
|
||||
start_time = time.time()
|
||||
while (time.time() - start_time) < timeout:
|
||||
try:
|
||||
if self._fd is None:
|
||||
self._fd = os.open(self._file_path, os.O_RDWR)
|
||||
|
||||
fcntl.lockf(self._fd, op | fcntl.LOCK_NB)
|
||||
if op == fcntl.LOCK_EX:
|
||||
os.write(self._fd, "pid=%s,host=%s" % (os.getpid(), socket.getfqdn()))
|
||||
return
|
||||
|
||||
except IOError as error:
|
||||
if error.errno == errno.EAGAIN or error.errno == errno.EACCES:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
time.sleep(_sleep_time)
|
||||
|
||||
raise LockError("Timed out waiting for lock.")
|
||||
|
||||
|
||||
def _unlock(self):
|
||||
"""Releases a lock using POSIX locks (``fcntl.lockf``)
|
||||
|
||||
Releases the lock regardless of mode. Note that read locks may
|
||||
be masquerading as write locks, but this removes either.
|
||||
|
||||
"""
|
||||
fcntl.lockf(self._fd,fcntl.LOCK_UN)
|
||||
os.close(self._fd)
|
||||
self._fd = None
|
||||
|
||||
|
||||
def acquire_read(self, timeout=_default_timeout):
|
||||
"""Acquires a recursive, shared lock for reading.
|
||||
|
||||
Read and write locks can be acquired and released in arbitrary
|
||||
order, but the POSIX lock is held until all local read and
|
||||
write locks are released.
|
||||
|
||||
Returns True if it is the first acquire and actually acquires
|
||||
the POSIX lock, False if it is a nested transaction.
|
||||
|
||||
"""
|
||||
if self._reads == 0 and self._writes == 0:
|
||||
self._lock(fcntl.LOCK_SH, timeout) # can raise LockError.
|
||||
self._reads += 1
|
||||
return True
|
||||
else:
|
||||
self._reads += 1
|
||||
return False
|
||||
|
||||
|
||||
def acquire_write(self, timeout=_default_timeout):
|
||||
"""Acquires a recursive, exclusive lock for writing.
|
||||
|
||||
Read and write locks can be acquired and released in arbitrary
|
||||
order, but the POSIX lock is held until all local read and
|
||||
write locks are released.
|
||||
|
||||
Returns True if it is the first acquire and actually acquires
|
||||
the POSIX lock, False if it is a nested transaction.
|
||||
|
||||
"""
|
||||
if self._writes == 0:
|
||||
self._lock(fcntl.LOCK_EX, timeout) # can raise LockError.
|
||||
self._writes += 1
|
||||
return True
|
||||
else:
|
||||
self._writes += 1
|
||||
return False
|
||||
|
||||
|
||||
def release_read(self):
|
||||
"""Releases a read lock.
|
||||
|
||||
Returns True if the last recursive lock was released, False if
|
||||
there are still outstanding locks.
|
||||
|
||||
Does limited correctness checking: if a read lock is released
|
||||
when none are held, this will raise an assertion error.
|
||||
|
||||
"""
|
||||
assert self._reads > 0
|
||||
|
||||
if self._reads == 1 and self._writes == 0:
|
||||
self._unlock() # can raise LockError.
|
||||
self._reads -= 1
|
||||
return True
|
||||
else:
|
||||
self._reads -= 1
|
||||
return False
|
||||
|
||||
|
||||
def release_write(self):
|
||||
"""Releases a write lock.
|
||||
|
||||
Returns True if the last recursive lock was released, False if
|
||||
there are still outstanding locks.
|
||||
|
||||
Does limited correctness checking: if a read lock is released
|
||||
when none are held, this will raise an assertion error.
|
||||
|
||||
"""
|
||||
assert self._writes > 0
|
||||
|
||||
if self._writes == 1 and self._reads == 0:
|
||||
self._unlock() # can raise LockError.
|
||||
self._writes -= 1
|
||||
return True
|
||||
else:
|
||||
self._writes -= 1
|
||||
return False
|
||||
|
||||
|
||||
class LockError(Exception):
|
||||
"""Raised when an attempt to acquire a lock times out."""
|
||||
pass
|
|
@ -33,8 +33,7 @@
|
|||
from StringIO import StringIO
|
||||
|
||||
from llnl.util.tty import terminal_size
|
||||
from llnl.util.tty.color import clen
|
||||
|
||||
from llnl.util.tty.color import clen, cextra
|
||||
|
||||
class ColumnConfig:
|
||||
def __init__(self, cols):
|
||||
|
@ -42,7 +41,6 @@ def __init__(self, cols):
|
|||
self.line_length = 0
|
||||
self.valid = True
|
||||
self.widths = [0] * cols # does not include ansi colors
|
||||
self.cwidths = [0] * cols # includes ansi colors
|
||||
|
||||
def __repr__(self):
|
||||
attrs = [(a,getattr(self, a)) for a in dir(self) if not a.startswith("__")]
|
||||
|
@ -66,8 +64,6 @@ def config_variable_cols(elts, console_width, padding, cols=0):
|
|||
# Get a bound on the most columns we could possibly have.
|
||||
# 'clen' ignores length of ansi color sequences.
|
||||
lengths = [clen(e) for e in elts]
|
||||
clengths = [len(e) for e in elts]
|
||||
|
||||
max_cols = max(1, console_width / (min(lengths) + padding))
|
||||
max_cols = min(len(elts), max_cols)
|
||||
|
||||
|
@ -85,7 +81,6 @@ def config_variable_cols(elts, console_width, padding, cols=0):
|
|||
if conf.widths[col] < (length + p):
|
||||
conf.line_length += length + p - conf.widths[col]
|
||||
conf.widths[col] = length + p
|
||||
conf.cwidths[col] = clengths[i] + p
|
||||
conf.valid = (conf.line_length < console_width)
|
||||
|
||||
try:
|
||||
|
@ -118,7 +113,6 @@ def config_uniform_cols(elts, console_width, padding, cols=0):
|
|||
|
||||
config = ColumnConfig(cols)
|
||||
config.widths = [max_len] * cols
|
||||
config.cwidths = [max_clen] * cols
|
||||
|
||||
return config
|
||||
|
||||
|
@ -147,9 +141,6 @@ def colify(elts, **options):
|
|||
method=<string> Method to use to fit columns. Options are variable or uniform.
|
||||
Variable-width columns are tighter, uniform columns are all the
|
||||
same width and fit less data on the screen.
|
||||
|
||||
len=<func> Function to use for calculating string length.
|
||||
Useful for ignoring ansi color. Default is 'len'.
|
||||
"""
|
||||
# Get keyword arguments or set defaults
|
||||
cols = options.pop("cols", 0)
|
||||
|
@ -199,9 +190,6 @@ def colify(elts, **options):
|
|||
raise ValueError("method must be one of: " + allowed_methods)
|
||||
|
||||
cols = config.cols
|
||||
formats = ["%%-%ds" % width for width in config.cwidths[:-1]]
|
||||
formats.append("%s") # last column has no trailing space
|
||||
|
||||
rows = (len(elts) + cols - 1) / cols
|
||||
rows_last_col = len(elts) % rows
|
||||
|
||||
|
@ -209,7 +197,9 @@ def colify(elts, **options):
|
|||
output.write(" " * indent)
|
||||
for col in xrange(cols):
|
||||
elt = col * rows + row
|
||||
output.write(formats[col] % elts[elt])
|
||||
width = config.widths[col] + cextra(elts[elt])
|
||||
fmt = '%%-%ds' % width
|
||||
output.write(fmt % elts[elt])
|
||||
|
||||
output.write("\n")
|
||||
row += 1
|
||||
|
|
|
@ -158,6 +158,11 @@ def clen(string):
|
|||
return len(re.sub(r'\033[^m]*m', '', string))
|
||||
|
||||
|
||||
def cextra(string):
|
||||
""""Length of extra color characters in a string"""
|
||||
return len(''.join(re.findall(r'\033[^m]*m', string)))
|
||||
|
||||
|
||||
def cwrite(string, stream=sys.stdout, color=None):
|
||||
"""Replace all color expressions in string with ANSI control
|
||||
codes and write the result to the stream. If color is
|
||||
|
|
|
@ -29,25 +29,27 @@
|
|||
import llnl.util.tty as tty
|
||||
|
||||
# This lives in $prefix/lib/spack/spack/__file__
|
||||
prefix = ancestor(__file__, 4)
|
||||
spack_root = ancestor(__file__, 4)
|
||||
|
||||
# The spack script itself
|
||||
spack_file = join_path(prefix, "bin", "spack")
|
||||
spack_file = join_path(spack_root, "bin", "spack")
|
||||
|
||||
# spack directory hierarchy
|
||||
etc_path = join_path(prefix, "etc")
|
||||
lib_path = join_path(prefix, "lib", "spack")
|
||||
lib_path = join_path(spack_root, "lib", "spack")
|
||||
build_env_path = join_path(lib_path, "env")
|
||||
module_path = join_path(lib_path, "spack")
|
||||
compilers_path = join_path(module_path, "compilers")
|
||||
test_path = join_path(module_path, "test")
|
||||
hooks_path = join_path(module_path, "hooks")
|
||||
var_path = join_path(prefix, "var", "spack")
|
||||
var_path = join_path(spack_root, "var", "spack")
|
||||
stage_path = join_path(var_path, "stage")
|
||||
packages_path = join_path(var_path, "packages")
|
||||
share_path = join_path(spack_root, "share", "spack")
|
||||
|
||||
prefix = spack_root
|
||||
opt_path = join_path(prefix, "opt")
|
||||
install_path = join_path(opt_path, "spack")
|
||||
share_path = join_path(prefix, "share", "spack")
|
||||
etc_path = join_path(prefix, "etc")
|
||||
|
||||
#
|
||||
# Setup the spack.repos namespace
|
||||
|
@ -65,6 +67,12 @@
|
|||
db = spack.packages.PackageFinder(*_repo_paths)
|
||||
sys.meta_path.append(db)
|
||||
|
||||
#
|
||||
# Set up the installed packages database
|
||||
#
|
||||
from spack.database import Database
|
||||
installed_db = Database(install_path)
|
||||
|
||||
#
|
||||
# Paths to mock files for testing.
|
||||
#
|
||||
|
|
|
@ -23,13 +23,13 @@
|
|||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
##############################################################################
|
||||
import os
|
||||
import platform as py_platform
|
||||
import re
|
||||
import platform
|
||||
|
||||
from llnl.util.lang import memoized
|
||||
|
||||
import spack
|
||||
import spack.error as serr
|
||||
from spack.version import Version
|
||||
|
||||
|
||||
class InvalidSysTypeError(serr.SpackError):
|
||||
|
@ -59,14 +59,11 @@ def get_sys_type_from_environment():
|
|||
return os.environ.get('SYS_TYPE')
|
||||
|
||||
|
||||
def get_mac_sys_type():
|
||||
"""Return a Mac OS SYS_TYPE or None if this isn't a mac."""
|
||||
mac_ver = py_platform.mac_ver()[0]
|
||||
if not mac_ver:
|
||||
return None
|
||||
|
||||
return "macosx_%s_%s" % (
|
||||
Version(mac_ver).up_to(2), py_platform.machine())
|
||||
def get_sys_type_from_platform():
|
||||
"""Return the architecture from Python's platform module."""
|
||||
sys_type = platform.system() + '-' + platform.machine()
|
||||
sys_type = re.sub(r'[^\w-]', '_', sys_type)
|
||||
return sys_type.lower()
|
||||
|
||||
|
||||
@memoized
|
||||
|
@ -74,7 +71,7 @@ def sys_type():
|
|||
"""Returns a SysType for the current machine."""
|
||||
methods = [get_sys_type_from_spack_globals,
|
||||
get_sys_type_from_environment,
|
||||
get_mac_sys_type]
|
||||
get_sys_type_from_platform]
|
||||
|
||||
# search for a method that doesn't return None
|
||||
sys_type = None
|
||||
|
|
|
@ -317,4 +317,9 @@ def child_fun():
|
|||
# message. Just make the parent exit with an error code.
|
||||
pid, returncode = os.waitpid(pid, 0)
|
||||
if returncode != 0:
|
||||
sys.exit(1)
|
||||
raise InstallError("Installation process had nonzero exit code."
|
||||
.format(str(returncode)))
|
||||
|
||||
|
||||
class InstallError(spack.error.SpackError):
|
||||
"""Raised when a package fails to install"""
|
||||
|
|
|
@ -124,7 +124,7 @@ def elide_list(line_list, max_num=10):
|
|||
|
||||
|
||||
def disambiguate_spec(spec):
|
||||
matching_specs = spack.db.get_installed(spec)
|
||||
matching_specs = spack.installed_db.query(spec)
|
||||
if not matching_specs:
|
||||
tty.die("Spec '%s' matches no installed packages." % spec)
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ def deactivate(parser, args):
|
|||
if args.all:
|
||||
if pkg.extendable:
|
||||
tty.msg("Deactivating all extensions of %s" % pkg.spec.short_spec)
|
||||
ext_pkgs = spack.db.installed_extensions_for(spec)
|
||||
ext_pkgs = spack.installed_db.installed_extensions_for(spec)
|
||||
|
||||
for ext_pkg in ext_pkgs:
|
||||
ext_pkg.spec.normalize()
|
||||
|
|
|
@ -58,36 +58,38 @@ def diy(self, args):
|
|||
if len(specs) > 1:
|
||||
tty.die("spack diy only takes one spec.")
|
||||
|
||||
spec = specs[0]
|
||||
if not spack.db.exists(spec.name):
|
||||
tty.warn("No such package: %s" % spec.name)
|
||||
create = tty.get_yes_or_no("Create this package?", default=False)
|
||||
if not create:
|
||||
tty.msg("Exiting without creating.")
|
||||
# Take a write lock before checking for existence.
|
||||
with spack.installed_db.write_transaction():
|
||||
spec = specs[0]
|
||||
if not spack.db.exists(spec.name):
|
||||
tty.warn("No such package: %s" % spec.name)
|
||||
create = tty.get_yes_or_no("Create this package?", default=False)
|
||||
if not create:
|
||||
tty.msg("Exiting without creating.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
tty.msg("Running 'spack edit -f %s'" % spec.name)
|
||||
edit_package(spec.name, True)
|
||||
return
|
||||
|
||||
if not spec.version.concrete:
|
||||
tty.die("spack diy spec must have a single, concrete version.")
|
||||
|
||||
spec.concretize()
|
||||
package = spack.db.get(spec)
|
||||
|
||||
if package.installed:
|
||||
tty.error("Already installed in %s" % package.prefix)
|
||||
tty.msg("Uninstall or try adding a version suffix for this DIY build.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
tty.msg("Running 'spack edit -f %s'" % spec.name)
|
||||
edit_package(spec.name, True)
|
||||
return
|
||||
|
||||
if not spec.version.concrete:
|
||||
tty.die("spack diy spec must have a single, concrete version.")
|
||||
# Forces the build to run out of the current directory.
|
||||
package.stage = DIYStage(os.getcwd())
|
||||
|
||||
spec.concretize()
|
||||
package = spack.db.get(spec)
|
||||
# TODO: make this an argument, not a global.
|
||||
spack.do_checksum = False
|
||||
|
||||
if package.installed:
|
||||
tty.error("Already installed in %s" % package.prefix)
|
||||
tty.msg("Uninstall or try adding a version suffix for this DIY build.")
|
||||
sys.exit(1)
|
||||
|
||||
# Forces the build to run out of the current directory.
|
||||
package.stage = DIYStage(os.getcwd())
|
||||
|
||||
# TODO: make this an argument, not a global.
|
||||
spack.do_checksum = False
|
||||
|
||||
package.do_install(
|
||||
keep_prefix=args.keep_prefix,
|
||||
ignore_deps=args.ignore_deps,
|
||||
keep_stage=True) # don't remove source dir for DIY.
|
||||
package.do_install(
|
||||
keep_prefix=args.keep_prefix,
|
||||
ignore_deps=args.ignore_deps,
|
||||
keep_stage=True) # don't remove source dir for DIY.
|
||||
|
|
|
@ -80,7 +80,7 @@ def extensions(parser, args):
|
|||
colify(ext.name for ext in extensions)
|
||||
|
||||
# List specs of installed extensions.
|
||||
installed = [s.spec for s in spack.db.installed_extensions_for(spec)]
|
||||
installed = [s.spec for s in spack.installed_db.installed_extensions_for(spec)]
|
||||
print
|
||||
if not installed:
|
||||
tty.msg("None installed.")
|
||||
|
|
|
@ -54,6 +54,16 @@ def setup_parser(subparser):
|
|||
'-L', '--very-long', action='store_true', dest='very_long',
|
||||
help='Show dependency hashes as well as versions.')
|
||||
|
||||
subparser.add_argument(
|
||||
'-u', '--unknown', action='store_true', dest='unknown',
|
||||
help='Show only specs Spack does not have a package for.')
|
||||
subparser.add_argument(
|
||||
'-m', '--missing', action='store_true', dest='missing',
|
||||
help='Show missing dependencies as well as installed specs.')
|
||||
subparser.add_argument(
|
||||
'-M', '--only-missing', action='store_true', dest='only_missing',
|
||||
help='Show only missing dependencies.')
|
||||
|
||||
subparser.add_argument(
|
||||
'query_specs', nargs=argparse.REMAINDER,
|
||||
help='optional specs to filter results')
|
||||
|
@ -113,6 +123,7 @@ def fmt(s):
|
|||
if hashes:
|
||||
string += gray_hash(s, hlen) + ' '
|
||||
string += s.format('$-_$@$+', color=True)
|
||||
|
||||
return string
|
||||
colify(fmt(s) for s in specs)
|
||||
|
||||
|
@ -136,11 +147,21 @@ def find(parser, args):
|
|||
if not query_specs:
|
||||
return
|
||||
|
||||
# Set up query arguments.
|
||||
installed, known = True, any
|
||||
if args.only_missing:
|
||||
installed = False
|
||||
elif args.missing:
|
||||
installed = any
|
||||
if args.unknown:
|
||||
known = False
|
||||
q_args = { 'installed' : installed, 'known' : known }
|
||||
|
||||
# Get all the specs the user asked for
|
||||
if not query_specs:
|
||||
specs = set(spack.db.installed_package_specs())
|
||||
specs = set(spack.installed_db.query(**q_args))
|
||||
else:
|
||||
results = [set(spack.db.get_installed(qs)) for qs in query_specs]
|
||||
results = [set(spack.installed_db.query(qs, **q_args)) for qs in query_specs]
|
||||
specs = set.union(*results)
|
||||
|
||||
if not args.mode:
|
||||
|
|
|
@ -65,11 +65,21 @@ def print_text_info(pkg):
|
|||
print "None"
|
||||
else:
|
||||
pad = padder(pkg.variants, 4)
|
||||
|
||||
maxv = max(len(v) for v in sorted(pkg.variants))
|
||||
fmt = "%%-%ss%%-10s%%s" % (maxv + 4)
|
||||
|
||||
print " " + fmt % ('Name', 'Default', 'Description')
|
||||
print
|
||||
for name in sorted(pkg.variants):
|
||||
v = pkg.variants[name]
|
||||
print " %s%s" % (
|
||||
pad(('+' if v.default else '-') + name + ':'),
|
||||
"\n".join(textwrap.wrap(v.description)))
|
||||
default = 'on' if v.default else 'off'
|
||||
|
||||
lines = textwrap.wrap(v.description)
|
||||
lines[1:] = [" " + (" " * maxv) + l for l in lines[1:]]
|
||||
desc = "\n".join(lines)
|
||||
|
||||
print " " + fmt % (name, default, desc)
|
||||
|
||||
print
|
||||
print "Dependencies:"
|
||||
|
|
|
@ -71,10 +71,11 @@ def install(parser, args):
|
|||
specs = spack.cmd.parse_specs(args.packages, concretize=True)
|
||||
for spec in specs:
|
||||
package = spack.db.get(spec)
|
||||
package.do_install(
|
||||
keep_prefix=args.keep_prefix,
|
||||
keep_stage=args.keep_stage,
|
||||
ignore_deps=args.ignore_deps,
|
||||
make_jobs=args.jobs,
|
||||
verbose=args.verbose,
|
||||
fake=args.fake)
|
||||
with spack.installed_db.write_transaction():
|
||||
package.do_install(
|
||||
keep_prefix=args.keep_prefix,
|
||||
keep_stage=args.keep_stage,
|
||||
ignore_deps=args.ignore_deps,
|
||||
make_jobs=args.jobs,
|
||||
verbose=args.verbose,
|
||||
fake=args.fake)
|
||||
|
|
|
@ -65,7 +65,7 @@ def module_find(mtype, spec_array):
|
|||
tty.die("You can only pass one spec.")
|
||||
spec = specs[0]
|
||||
|
||||
specs = [s for s in spack.db.installed_package_specs() if s.satisfies(spec)]
|
||||
specs = spack.installed_db.query(spec)
|
||||
if len(specs) == 0:
|
||||
tty.die("No installed packages match spec %s" % spec)
|
||||
|
||||
|
@ -86,7 +86,7 @@ def module_find(mtype, spec_array):
|
|||
def module_refresh():
|
||||
"""Regenerate all module files for installed packages known to
|
||||
spack (some packages may no longer exist)."""
|
||||
specs = [s for s in spack.db.installed_known_package_specs()]
|
||||
specs = [s for s in spack.installed_db.query(installed=True, known=True)]
|
||||
|
||||
for name, cls in module_types.items():
|
||||
tty.msg("Regenerating %s module files." % name)
|
||||
|
|
31
lib/spack/spack/cmd/reindex.py
Normal file
31
lib/spack/spack/cmd/reindex.py
Normal file
|
@ -0,0 +1,31 @@
|
|||
##############################################################################
|
||||
# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
|
||||
# Produced at the Lawrence Livermore National Laboratory.
|
||||
#
|
||||
# This file is part of Spack.
|
||||
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
|
||||
# LLNL-CODE-647188
|
||||
#
|
||||
# For details, see https://scalability-llnl.github.io/spack
|
||||
# Please also see the LICENSE file for our notice and the LGPL.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License (as published by
|
||||
# the Free Software Foundation) version 2.1 dated February 1999.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
|
||||
# conditions of the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
##############################################################################
|
||||
from external import argparse
|
||||
import spack
|
||||
|
||||
description = "Rebuild Spack's package database."
|
||||
|
||||
def reindex(parser, args):
|
||||
spack.installed_db.reindex(spack.install_layout)
|
211
lib/spack/spack/cmd/test-install.py
Normal file
211
lib/spack/spack/cmd/test-install.py
Normal file
|
@ -0,0 +1,211 @@
|
|||
##############################################################################
|
||||
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
|
||||
# Produced at the Lawrence Livermore National Laboratory.
|
||||
#
|
||||
# This file is part of Spack.
|
||||
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
|
||||
# LLNL-CODE-647188
|
||||
#
|
||||
# For details, see https://scalability-llnl.github.io/spack
|
||||
# Please also see the LICENSE file for our notice and the LGPL.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License (as published by
|
||||
# the Free Software Foundation) version 2.1 dated February 1999.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
|
||||
# conditions of the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
##############################################################################
|
||||
from external import argparse
|
||||
import xml.etree.ElementTree as ET
|
||||
import itertools
|
||||
import re
|
||||
import os
|
||||
import codecs
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import *
|
||||
|
||||
import spack
|
||||
from spack.build_environment import InstallError
|
||||
from spack.fetch_strategy import FetchError
|
||||
import spack.cmd
|
||||
|
||||
description = "Treat package installations as unit tests and output formatted test results"
|
||||
|
||||
def setup_parser(subparser):
|
||||
subparser.add_argument(
|
||||
'-j', '--jobs', action='store', type=int,
|
||||
help="Explicitly set number of make jobs. Default is #cpus.")
|
||||
|
||||
subparser.add_argument(
|
||||
'-n', '--no-checksum', action='store_true', dest='no_checksum',
|
||||
help="Do not check packages against checksum")
|
||||
|
||||
subparser.add_argument(
|
||||
'-o', '--output', action='store', help="test output goes in this file")
|
||||
|
||||
subparser.add_argument(
|
||||
'package', nargs=argparse.REMAINDER, help="spec of package to install")
|
||||
|
||||
|
||||
class JunitResultFormat(object):
|
||||
def __init__(self):
|
||||
self.root = ET.Element('testsuite')
|
||||
self.tests = []
|
||||
|
||||
def add_test(self, buildId, testResult, buildInfo=None):
|
||||
self.tests.append((buildId, testResult, buildInfo))
|
||||
|
||||
def write_to(self, stream):
|
||||
self.root.set('tests', '{0}'.format(len(self.tests)))
|
||||
for buildId, testResult, buildInfo in self.tests:
|
||||
testcase = ET.SubElement(self.root, 'testcase')
|
||||
testcase.set('classname', buildId.name)
|
||||
testcase.set('name', buildId.stringId())
|
||||
if testResult == TestResult.FAILED:
|
||||
failure = ET.SubElement(testcase, 'failure')
|
||||
failure.set('type', "Build Error")
|
||||
failure.text = buildInfo
|
||||
elif testResult == TestResult.SKIPPED:
|
||||
skipped = ET.SubElement(testcase, 'skipped')
|
||||
skipped.set('type', "Skipped Build")
|
||||
skipped.text = buildInfo
|
||||
ET.ElementTree(self.root).write(stream)
|
||||
|
||||
|
||||
class TestResult(object):
|
||||
PASSED = 0
|
||||
FAILED = 1
|
||||
SKIPPED = 2
|
||||
|
||||
|
||||
class BuildId(object):
|
||||
def __init__(self, spec):
|
||||
self.name = spec.name
|
||||
self.version = spec.version
|
||||
self.hashId = spec.dag_hash()
|
||||
|
||||
def stringId(self):
|
||||
return "-".join(str(x) for x in (self.name, self.version, self.hashId))
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.name, self.version, self.hashId))
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, BuildId):
|
||||
return False
|
||||
|
||||
return ((self.name, self.version, self.hashId) ==
|
||||
(other.name, other.version, other.hashId))
|
||||
|
||||
|
||||
def fetch_log(path):
|
||||
if not os.path.exists(path):
|
||||
return list()
|
||||
with codecs.open(path, 'rb', 'utf-8') as F:
|
||||
return list(line.strip() for line in F.readlines())
|
||||
|
||||
|
||||
def failed_dependencies(spec):
|
||||
return set(childSpec for childSpec in spec.dependencies.itervalues() if not
|
||||
spack.db.get(childSpec).installed)
|
||||
|
||||
|
||||
def create_test_output(topSpec, newInstalls, output, getLogFunc=fetch_log):
|
||||
# Post-order traversal is not strictly required but it makes sense to output
|
||||
# tests for dependencies first.
|
||||
for spec in topSpec.traverse(order='post'):
|
||||
if spec not in newInstalls:
|
||||
continue
|
||||
|
||||
failedDeps = failed_dependencies(spec)
|
||||
package = spack.db.get(spec)
|
||||
if failedDeps:
|
||||
result = TestResult.SKIPPED
|
||||
dep = iter(failedDeps).next()
|
||||
depBID = BuildId(dep)
|
||||
errOutput = "Skipped due to failed dependency: {0}".format(
|
||||
depBID.stringId())
|
||||
elif (not package.installed) and (not package.stage.source_path):
|
||||
result = TestResult.FAILED
|
||||
errOutput = "Failure to fetch package resources."
|
||||
elif not package.installed:
|
||||
result = TestResult.FAILED
|
||||
lines = getLogFunc(package.build_log_path)
|
||||
errMessages = list(line for line in lines if
|
||||
re.search('error:', line, re.IGNORECASE))
|
||||
errOutput = errMessages if errMessages else lines[-10:]
|
||||
errOutput = '\n'.join(itertools.chain(
|
||||
[spec.to_yaml(), "Errors:"], errOutput,
|
||||
["Build Log:", package.build_log_path]))
|
||||
else:
|
||||
result = TestResult.PASSED
|
||||
errOutput = None
|
||||
|
||||
bId = BuildId(spec)
|
||||
output.add_test(bId, result, errOutput)
|
||||
|
||||
|
||||
def test_install(parser, args):
|
||||
if not args.package:
|
||||
tty.die("install requires a package argument")
|
||||
|
||||
if args.jobs is not None:
|
||||
if args.jobs <= 0:
|
||||
tty.die("The -j option must be a positive integer!")
|
||||
|
||||
if args.no_checksum:
|
||||
spack.do_checksum = False # TODO: remove this global.
|
||||
|
||||
specs = spack.cmd.parse_specs(args.package, concretize=True)
|
||||
if len(specs) > 1:
|
||||
tty.die("Only 1 top-level package can be specified")
|
||||
topSpec = iter(specs).next()
|
||||
|
||||
newInstalls = set()
|
||||
for spec in topSpec.traverse():
|
||||
package = spack.db.get(spec)
|
||||
if not package.installed:
|
||||
newInstalls.add(spec)
|
||||
|
||||
if not args.output:
|
||||
bId = BuildId(topSpec)
|
||||
outputDir = join_path(os.getcwd(), "test-output")
|
||||
if not os.path.exists(outputDir):
|
||||
os.mkdir(outputDir)
|
||||
outputFpath = join_path(outputDir, "test-{0}.xml".format(bId.stringId()))
|
||||
else:
|
||||
outputFpath = args.output
|
||||
|
||||
for spec in topSpec.traverse(order='post'):
|
||||
# Calling do_install for the top-level package would be sufficient but
|
||||
# this attempts to keep going if any package fails (other packages which
|
||||
# are not dependents may succeed)
|
||||
package = spack.db.get(spec)
|
||||
if (not failed_dependencies(spec)) and (not package.installed):
|
||||
try:
|
||||
package.do_install(
|
||||
keep_prefix=False,
|
||||
keep_stage=True,
|
||||
ignore_deps=False,
|
||||
make_jobs=args.jobs,
|
||||
verbose=True,
|
||||
fake=False)
|
||||
except InstallError:
|
||||
pass
|
||||
except FetchError:
|
||||
pass
|
||||
|
||||
jrf = JunitResultFormat()
|
||||
handled = {}
|
||||
create_test_output(topSpec, newInstalls, jrf)
|
||||
|
||||
with open(outputFpath, 'wb') as F:
|
||||
jrf.write_to(F)
|
|
@ -53,51 +53,52 @@ def uninstall(parser, args):
|
|||
if not args.packages:
|
||||
tty.die("uninstall requires at least one package argument.")
|
||||
|
||||
specs = spack.cmd.parse_specs(args.packages)
|
||||
with spack.installed_db.write_transaction():
|
||||
specs = spack.cmd.parse_specs(args.packages)
|
||||
|
||||
# For each spec provided, make sure it refers to only one package.
|
||||
# Fail and ask user to be unambiguous if it doesn't
|
||||
pkgs = []
|
||||
for spec in specs:
|
||||
matching_specs = spack.db.get_installed(spec)
|
||||
if not args.all and len(matching_specs) > 1:
|
||||
tty.error("%s matches multiple packages:" % spec)
|
||||
print
|
||||
display_specs(matching_specs, long=True)
|
||||
print
|
||||
print "You can either:"
|
||||
print " a) Use a more specific spec, or"
|
||||
print " b) use spack uninstall -a to uninstall ALL matching specs."
|
||||
sys.exit(1)
|
||||
# For each spec provided, make sure it refers to only one package.
|
||||
# Fail and ask user to be unambiguous if it doesn't
|
||||
pkgs = []
|
||||
for spec in specs:
|
||||
matching_specs = spack.installed_db.query(spec)
|
||||
if not args.all and len(matching_specs) > 1:
|
||||
tty.error("%s matches multiple packages:" % spec)
|
||||
print
|
||||
display_specs(matching_specs, long=True)
|
||||
print
|
||||
print "You can either:"
|
||||
print " a) Use a more specific spec, or"
|
||||
print " b) use spack uninstall -a to uninstall ALL matching specs."
|
||||
sys.exit(1)
|
||||
|
||||
if len(matching_specs) == 0:
|
||||
if args.force: continue
|
||||
tty.die("%s does not match any installed packages." % spec)
|
||||
if len(matching_specs) == 0:
|
||||
if args.force: continue
|
||||
tty.die("%s does not match any installed packages." % spec)
|
||||
|
||||
for s in matching_specs:
|
||||
for s in matching_specs:
|
||||
try:
|
||||
# should work if package is known to spack
|
||||
pkgs.append(s.package)
|
||||
|
||||
except spack.packages.UnknownPackageError, e:
|
||||
# The package.py file has gone away -- but still want to uninstall.
|
||||
spack.Package(s).do_uninstall(force=True)
|
||||
|
||||
# Sort packages to be uninstalled by the number of installed dependents
|
||||
# This ensures we do things in the right order
|
||||
def num_installed_deps(pkg):
|
||||
return len(pkg.installed_dependents)
|
||||
pkgs.sort(key=num_installed_deps)
|
||||
|
||||
# Uninstall packages in order now.
|
||||
for pkg in pkgs:
|
||||
try:
|
||||
# should work if package is known to spack
|
||||
pkgs.append(s.package)
|
||||
|
||||
except spack.packages.UnknownPackageError, e:
|
||||
# The package.py file has gone away -- but still want to uninstall.
|
||||
spack.Package(s).do_uninstall(force=True)
|
||||
|
||||
# Sort packages to be uninstalled by the number of installed dependents
|
||||
# This ensures we do things in the right order
|
||||
def num_installed_deps(pkg):
|
||||
return len(pkg.installed_dependents)
|
||||
pkgs.sort(key=num_installed_deps)
|
||||
|
||||
# Uninstall packages in order now.
|
||||
for pkg in pkgs:
|
||||
try:
|
||||
pkg.do_uninstall(force=args.force)
|
||||
except PackageStillNeededError, e:
|
||||
tty.error("Will not uninstall %s" % e.spec.format("$_$@$%@$#", color=True))
|
||||
print
|
||||
print "The following packages depend on it:"
|
||||
display_specs(e.dependents, long=True)
|
||||
print
|
||||
print "You can use spack uninstall -f to force this action."
|
||||
sys.exit(1)
|
||||
pkg.do_uninstall(force=args.force)
|
||||
except PackageStillNeededError, e:
|
||||
tty.error("Will not uninstall %s" % e.spec.format("$_$@$%@$#", color=True))
|
||||
print
|
||||
print "The following packages depend on it:"
|
||||
display_specs(e.dependents, long=True)
|
||||
print
|
||||
print "You can use spack uninstall -f to force this action."
|
||||
sys.exit(1)
|
||||
|
|
|
@ -227,14 +227,32 @@ def find(cls, *path):
|
|||
for d in dicts:
|
||||
all_keys.update(d)
|
||||
|
||||
compilers = []
|
||||
compilers = {}
|
||||
for k in all_keys:
|
||||
ver, pre, suf = k
|
||||
|
||||
# Skip compilers with unknown version.
|
||||
if ver == 'unknown':
|
||||
continue
|
||||
|
||||
paths = tuple(pn[k] if k in pn else None for pn in dicts)
|
||||
spec = spack.spec.CompilerSpec(cls.name, ver)
|
||||
compilers.append(cls(spec, *paths))
|
||||
|
||||
return compilers
|
||||
if ver in compilers:
|
||||
prev = compilers[ver]
|
||||
|
||||
# prefer the one with more compilers.
|
||||
prev_paths = [prev.cc, prev.cxx, prev.f77, prev.fc]
|
||||
newcount = len([p for p in paths if p is not None])
|
||||
prevcount = len([p for p in prev_paths if p is not None])
|
||||
|
||||
# Don't add if it's not an improvement over prev compiler.
|
||||
if newcount <= prevcount:
|
||||
continue
|
||||
|
||||
compilers[ver] = cls(spec, *paths)
|
||||
|
||||
return list(compilers.values())
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
|
|
628
lib/spack/spack/database.py
Normal file
628
lib/spack/spack/database.py
Normal file
|
@ -0,0 +1,628 @@
|
|||
##############################################################################
|
||||
# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
|
||||
# Produced at the Lawrence Livermore National Laboratory.
|
||||
#
|
||||
# This file is part of Spack.
|
||||
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
|
||||
# LLNL-CODE-647188
|
||||
#
|
||||
# For details, see https://scalability-llnl.github.io/spack
|
||||
# Please also see the LICENSE file for our notice and the LGPL.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License (as published by
|
||||
# the Free Software Foundation) version 2.1 dated February 1999.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
|
||||
# conditions of the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
##############################################################################
|
||||
"""Spack's installation tracking database.
|
||||
|
||||
The database serves two purposes:
|
||||
|
||||
1. It implements a cache on top of a potentially very large Spack
|
||||
directory hierarchy, speeding up many operations that would
|
||||
otherwise require filesystem access.
|
||||
|
||||
2. It will allow us to track external installations as well as lost
|
||||
packages and their dependencies.
|
||||
|
||||
Prior ot the implementation of this store, a direcotry layout served
|
||||
as the authoritative database of packages in Spack. This module
|
||||
provides a cache and a sanity checking mechanism for what is in the
|
||||
filesystem.
|
||||
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
import socket
|
||||
|
||||
from external import yaml
|
||||
from external.yaml.error import MarkedYAMLError, YAMLError
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.filesystem import *
|
||||
from llnl.util.lock import *
|
||||
|
||||
import spack.spec
|
||||
from spack.version import Version
|
||||
from spack.spec import Spec
|
||||
from spack.error import SpackError
|
||||
|
||||
# DB goes in this directory underneath the root
|
||||
_db_dirname = '.spack-db'
|
||||
|
||||
# DB version. This is stuck in the DB file to track changes in format.
|
||||
_db_version = Version('0.9')
|
||||
|
||||
# Default timeout for spack database locks is 5 min.
|
||||
_db_lock_timeout = 60
|
||||
|
||||
|
||||
def _autospec(function):
|
||||
"""Decorator that automatically converts the argument of a single-arg
|
||||
function to a Spec."""
|
||||
def converter(self, spec_like, *args, **kwargs):
|
||||
if not isinstance(spec_like, spack.spec.Spec):
|
||||
spec_like = spack.spec.Spec(spec_like)
|
||||
return function(self, spec_like, *args, **kwargs)
|
||||
return converter
|
||||
|
||||
|
||||
class InstallRecord(object):
|
||||
"""A record represents one installation in the DB.
|
||||
|
||||
The record keeps track of the spec for the installation, its
|
||||
install path, AND whether or not it is installed. We need the
|
||||
installed flag in case a user either:
|
||||
|
||||
a) blew away a directory, or
|
||||
b) used spack uninstall -f to get rid of it
|
||||
|
||||
If, in either case, the package was removed but others still
|
||||
depend on it, we still need to track its spec, so we don't
|
||||
actually remove from the database until a spec has no installed
|
||||
dependents left.
|
||||
|
||||
"""
|
||||
def __init__(self, spec, path, installed, ref_count=0):
|
||||
self.spec = spec
|
||||
self.path = str(path)
|
||||
self.installed = bool(installed)
|
||||
self.ref_count = ref_count
|
||||
|
||||
def to_dict(self):
|
||||
return { 'spec' : self.spec.to_node_dict(),
|
||||
'path' : self.path,
|
||||
'installed' : self.installed,
|
||||
'ref_count' : self.ref_count }
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, spec, dictionary):
|
||||
d = dictionary
|
||||
return InstallRecord(spec, d['path'], d['installed'], d['ref_count'])
|
||||
|
||||
|
||||
class Database(object):
|
||||
def __init__(self, root, db_dir=None):
|
||||
"""Create a Database for Spack installations under ``root``.
|
||||
|
||||
A Database is a cache of Specs data from ``$prefix/spec.yaml``
|
||||
files in Spack installation directories.
|
||||
|
||||
By default, Database files (data and lock files) are stored
|
||||
under ``root/.spack-db``, which is created if it does not
|
||||
exist. This is the ``db_dir``.
|
||||
|
||||
The Database will attempt to read an ``index.yaml`` file in
|
||||
``db_dir``. If it does not find one, it will be created when
|
||||
needed by scanning the entire Database root for ``spec.yaml``
|
||||
files according to Spack's ``DirectoryLayout``.
|
||||
|
||||
Caller may optionally provide a custom ``db_dir`` parameter
|
||||
where data will be stored. This is intended to be used for
|
||||
testing the Database class.
|
||||
|
||||
"""
|
||||
self.root = root
|
||||
|
||||
if db_dir is None:
|
||||
# If the db_dir is not provided, default to within the db root.
|
||||
self._db_dir = join_path(self.root, _db_dirname)
|
||||
else:
|
||||
# Allow customizing the database directory location for testing.
|
||||
self._db_dir = db_dir
|
||||
|
||||
# Set up layout of database files within the db dir
|
||||
self._index_path = join_path(self._db_dir, 'index.yaml')
|
||||
self._lock_path = join_path(self._db_dir, 'lock')
|
||||
|
||||
# Create needed directories and files
|
||||
if not os.path.exists(self._db_dir):
|
||||
mkdirp(self._db_dir)
|
||||
|
||||
if not os.path.exists(self._lock_path):
|
||||
touch(self._lock_path)
|
||||
|
||||
# initialize rest of state.
|
||||
self.lock = Lock(self._lock_path)
|
||||
self._data = {}
|
||||
|
||||
|
||||
def write_transaction(self, timeout=_db_lock_timeout):
|
||||
"""Get a write lock context manager for use in a `with` block."""
|
||||
return WriteTransaction(self, self._read, self._write, timeout)
|
||||
|
||||
|
||||
def read_transaction(self, timeout=_db_lock_timeout):
|
||||
"""Get a read lock context manager for use in a `with` block."""
|
||||
return ReadTransaction(self, self._read, None, timeout)
|
||||
|
||||
|
||||
def _write_to_yaml(self, stream):
|
||||
"""Write out the databsae to a YAML file.
|
||||
|
||||
This function does not do any locking or transactions.
|
||||
"""
|
||||
# map from per-spec hash code to installation record.
|
||||
installs = dict((k, v.to_dict()) for k, v in self._data.items())
|
||||
|
||||
# database includes installation list and version.
|
||||
|
||||
# NOTE: this DB version does not handle multiple installs of
|
||||
# the same spec well. If there are 2 identical specs with
|
||||
# different paths, it can't differentiate.
|
||||
# TODO: fix this before we support multiple install locations.
|
||||
database = {
|
||||
'database' : {
|
||||
'installs' : installs,
|
||||
'version' : str(_db_version)
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
return yaml.dump(database, stream=stream, default_flow_style=False)
|
||||
except YAMLError as e:
|
||||
raise SpackYAMLError("error writing YAML database:", str(e))
|
||||
|
||||
|
||||
def _read_spec_from_yaml(self, hash_key, installs, parent_key=None):
|
||||
"""Recursively construct a spec from a hash in a YAML database.
|
||||
|
||||
Does not do any locking.
|
||||
"""
|
||||
if hash_key not in installs:
|
||||
parent = read_spec(installs[parent_key]['path'])
|
||||
|
||||
spec_dict = installs[hash_key]['spec']
|
||||
|
||||
# Build spec from dict first.
|
||||
spec = Spec.from_node_dict(spec_dict)
|
||||
|
||||
# Add dependencies from other records in the install DB to
|
||||
# form a full spec.
|
||||
for dep_hash in spec_dict[spec.name]['dependencies'].values():
|
||||
child = self._read_spec_from_yaml(dep_hash, installs, hash_key)
|
||||
spec._add_dependency(child)
|
||||
|
||||
return spec
|
||||
|
||||
|
||||
def _read_from_yaml(self, stream):
|
||||
"""
|
||||
Fill database from YAML, do not maintain old data
|
||||
Translate the spec portions from node-dict form to spec form
|
||||
|
||||
Does not do any locking.
|
||||
"""
|
||||
try:
|
||||
if isinstance(stream, basestring):
|
||||
with open(stream, 'r') as f:
|
||||
yfile = yaml.load(f)
|
||||
else:
|
||||
yfile = yaml.load(stream)
|
||||
|
||||
except MarkedYAMLError as e:
|
||||
raise SpackYAMLError("error parsing YAML database:", str(e))
|
||||
|
||||
if yfile is None:
|
||||
return
|
||||
|
||||
def check(cond, msg):
|
||||
if not cond: raise CorruptDatabaseError(self._index_path, msg)
|
||||
|
||||
check('database' in yfile, "No 'database' attribute in YAML.")
|
||||
|
||||
# High-level file checks
|
||||
db = yfile['database']
|
||||
check('installs' in db, "No 'installs' in YAML DB.")
|
||||
check('version' in db, "No 'version' in YAML DB.")
|
||||
|
||||
# TODO: better version checking semantics.
|
||||
version = Version(db['version'])
|
||||
if version != _db_version:
|
||||
raise InvalidDatabaseVersionError(_db_version, version)
|
||||
|
||||
# Iterate through database and check each record.
|
||||
installs = db['installs']
|
||||
data = {}
|
||||
for hash_key, rec in installs.items():
|
||||
try:
|
||||
# This constructs a spec DAG from the list of all installs
|
||||
spec = self._read_spec_from_yaml(hash_key, installs)
|
||||
|
||||
# Validate the spec by ensuring the stored and actual
|
||||
# hashes are the same.
|
||||
spec_hash = spec.dag_hash()
|
||||
if not spec_hash == hash_key:
|
||||
tty.warn("Hash mismatch in database: %s -> spec with hash %s"
|
||||
% (hash_key, spec_hash))
|
||||
continue # TODO: is skipping the right thing to do?
|
||||
|
||||
# Insert the brand new spec in the database. Each
|
||||
# spec has its own copies of its dependency specs.
|
||||
# TODO: would a more immmutable spec implementation simplify this?
|
||||
data[hash_key] = InstallRecord.from_dict(spec, rec)
|
||||
|
||||
except Exception as e:
|
||||
tty.warn("Invalid database reecord:",
|
||||
"file: %s" % self._index_path,
|
||||
"hash: %s" % hash_key,
|
||||
"cause: %s" % str(e))
|
||||
raise
|
||||
|
||||
self._data = data
|
||||
|
||||
|
||||
def reindex(self, directory_layout):
|
||||
"""Build database index from scratch based from a directory layout.
|
||||
|
||||
Locks the DB if it isn't locked already.
|
||||
|
||||
"""
|
||||
with self.write_transaction():
|
||||
old_data = self._data
|
||||
try:
|
||||
self._data = {}
|
||||
|
||||
# Ask the directory layout to traverse the filesystem.
|
||||
for spec in directory_layout.all_specs():
|
||||
# Create a spec for each known package and add it.
|
||||
path = directory_layout.path_for_spec(spec)
|
||||
self._add(spec, path, directory_layout)
|
||||
|
||||
self._check_ref_counts()
|
||||
|
||||
except:
|
||||
# If anything explodes, restore old data, skip write.
|
||||
self._data = old_data
|
||||
raise
|
||||
|
||||
|
||||
def _check_ref_counts(self):
|
||||
"""Ensure consistency of reference counts in the DB.
|
||||
|
||||
Raise an AssertionError if something is amiss.
|
||||
|
||||
Does no locking.
|
||||
"""
|
||||
counts = {}
|
||||
for key, rec in self._data.items():
|
||||
counts.setdefault(key, 0)
|
||||
for dep in rec.spec.dependencies.values():
|
||||
dep_key = dep.dag_hash()
|
||||
counts.setdefault(dep_key, 0)
|
||||
counts[dep_key] += 1
|
||||
|
||||
for rec in self._data.values():
|
||||
key = rec.spec.dag_hash()
|
||||
expected = counts[key]
|
||||
found = rec.ref_count
|
||||
if not expected == found:
|
||||
raise AssertionError(
|
||||
"Invalid ref_count: %s: %d (expected %d), in DB %s."
|
||||
% (key, found, expected, self._index_path))
|
||||
|
||||
|
||||
def _write(self):
|
||||
"""Write the in-memory database index to its file path.
|
||||
|
||||
Does no locking.
|
||||
|
||||
"""
|
||||
temp_file = self._index_path + (
|
||||
'.%s.%s.temp' % (socket.getfqdn(), os.getpid()))
|
||||
|
||||
# Write a temporary database file them move it into place
|
||||
try:
|
||||
with open(temp_file, 'w') as f:
|
||||
self._write_to_yaml(f)
|
||||
os.rename(temp_file, self._index_path)
|
||||
except:
|
||||
# Clean up temp file if something goes wrong.
|
||||
if os.path.exists(temp_file):
|
||||
os.remove(temp_file)
|
||||
raise
|
||||
|
||||
|
||||
def _read(self):
|
||||
"""Re-read Database from the data in the set location.
|
||||
|
||||
This does no locking.
|
||||
"""
|
||||
if os.path.isfile(self._index_path):
|
||||
# Read from YAML file if a database exists
|
||||
self._read_from_yaml(self._index_path)
|
||||
|
||||
else:
|
||||
# The file doesn't exist, try to traverse the directory.
|
||||
# reindex() takes its own write lock, so no lock here.
|
||||
self.reindex(spack.install_layout)
|
||||
|
||||
|
||||
def _add(self, spec, path, directory_layout=None):
|
||||
"""Add an install record for spec at path to the database.
|
||||
|
||||
This assumes that the spec is not already installed. It
|
||||
updates the ref counts on dependencies of the spec in the DB.
|
||||
|
||||
This operation is in-memory, and does not lock the DB.
|
||||
|
||||
"""
|
||||
key = spec.dag_hash()
|
||||
if key in self._data:
|
||||
rec = self._data[key]
|
||||
rec.installed = True
|
||||
|
||||
# TODO: this overwrites a previous install path (when path !=
|
||||
# self._data[key].path), and the old path still has a
|
||||
# dependent in the DB. We could consider re-RPATH-ing the
|
||||
# dependents. This case is probably infrequent and may not be
|
||||
# worth fixing, but this is where we can discover it.
|
||||
rec.path = path
|
||||
|
||||
else:
|
||||
self._data[key] = InstallRecord(spec, path, True)
|
||||
for dep in spec.dependencies.values():
|
||||
self._increment_ref_count(dep, directory_layout)
|
||||
|
||||
|
||||
def _increment_ref_count(self, spec, directory_layout=None):
|
||||
"""Recursively examine dependencies and update their DB entries."""
|
||||
key = spec.dag_hash()
|
||||
if key not in self._data:
|
||||
installed = False
|
||||
path = None
|
||||
if directory_layout:
|
||||
path = directory_layout.path_for_spec(spec)
|
||||
installed = os.path.isdir(path)
|
||||
|
||||
self._data[key] = InstallRecord(spec.copy(), path, installed)
|
||||
|
||||
for dep in spec.dependencies.values():
|
||||
self._increment_ref_count(dep)
|
||||
|
||||
self._data[key].ref_count += 1
|
||||
|
||||
@_autospec
|
||||
def add(self, spec, path):
|
||||
"""Add spec at path to database, locking and reading DB to sync.
|
||||
|
||||
``add()`` will lock and read from the DB on disk.
|
||||
|
||||
"""
|
||||
# TODO: ensure that spec is concrete?
|
||||
# Entire add is transactional.
|
||||
with self.write_transaction():
|
||||
self._add(spec, path)
|
||||
|
||||
|
||||
def _get_matching_spec_key(self, spec, **kwargs):
|
||||
"""Get the exact spec OR get a single spec that matches."""
|
||||
key = spec.dag_hash()
|
||||
if not key in self._data:
|
||||
match = self.query_one(spec, **kwargs)
|
||||
if match:
|
||||
return match.dag_hash()
|
||||
raise KeyError("No such spec in database! %s" % spec)
|
||||
return key
|
||||
|
||||
|
||||
@_autospec
|
||||
def get_record(self, spec, **kwargs):
|
||||
key = self._get_matching_spec_key(spec, **kwargs)
|
||||
return self._data[key]
|
||||
|
||||
|
||||
def _decrement_ref_count(self, spec):
|
||||
key = spec.dag_hash()
|
||||
|
||||
if not key in self._data:
|
||||
# TODO: print something here? DB is corrupt, but
|
||||
# not much we can do.
|
||||
return
|
||||
|
||||
rec = self._data[key]
|
||||
rec.ref_count -= 1
|
||||
|
||||
if rec.ref_count == 0 and not rec.installed:
|
||||
del self._data[key]
|
||||
for dep in spec.dependencies.values():
|
||||
self._decrement_ref_count(dep)
|
||||
|
||||
|
||||
def _remove(self, spec):
|
||||
"""Non-locking version of remove(); does real work.
|
||||
"""
|
||||
key = self._get_matching_spec_key(spec)
|
||||
rec = self._data[key]
|
||||
|
||||
if rec.ref_count > 0:
|
||||
rec.installed = False
|
||||
return rec.spec
|
||||
|
||||
del self._data[key]
|
||||
for dep in rec.spec.dependencies.values():
|
||||
self._decrement_ref_count(dep)
|
||||
|
||||
# Returns the concrete spec so we know it in the case where a
|
||||
# query spec was passed in.
|
||||
return rec.spec
|
||||
|
||||
|
||||
@_autospec
|
||||
def remove(self, spec):
|
||||
"""Removes a spec from the database. To be called on uninstall.
|
||||
|
||||
Reads the database, then:
|
||||
|
||||
1. Marks the spec as not installed.
|
||||
2. Removes the spec if it has no more dependents.
|
||||
3. If removed, recursively updates dependencies' ref counts
|
||||
and remvoes them if they are no longer needed.
|
||||
|
||||
"""
|
||||
# Take a lock around the entire removal.
|
||||
with self.write_transaction():
|
||||
return self._remove(spec)
|
||||
|
||||
|
||||
@_autospec
|
||||
def installed_extensions_for(self, extendee_spec):
|
||||
"""
|
||||
Return the specs of all packages that extend
|
||||
the given spec
|
||||
"""
|
||||
for s in self.query():
|
||||
try:
|
||||
if s.package.extends(extendee_spec):
|
||||
yield s.package
|
||||
except UnknownPackageError as e:
|
||||
continue
|
||||
# skips unknown packages
|
||||
# TODO: conditional way to do this instead of catching exceptions
|
||||
|
||||
|
||||
def query(self, query_spec=any, known=any, installed=True):
|
||||
"""Run a query on the database.
|
||||
|
||||
``query_spec``
|
||||
Queries iterate through specs in the database and return
|
||||
those that satisfy the supplied ``query_spec``. If
|
||||
query_spec is `any`, This will match all specs in the
|
||||
database. If it is a spec, we'll evaluate
|
||||
``spec.satisfies(query_spec)``.
|
||||
|
||||
The query can be constrained by two additional attributes:
|
||||
|
||||
``known``
|
||||
Possible values: True, False, any
|
||||
|
||||
Specs that are "known" are those for which Spack can
|
||||
locate a ``package.py`` file -- i.e., Spack "knows" how to
|
||||
install them. Specs that are unknown may represent
|
||||
packages that existed in a previous version of Spack, but
|
||||
have since either changed their name or been removed.
|
||||
|
||||
``installed``
|
||||
Possible values: True, False, any
|
||||
|
||||
Specs for which a prefix exists are "installed". A spec
|
||||
that is NOT installed will be in the database if some
|
||||
other spec depends on it but its installation has gone
|
||||
away since Spack installed it.
|
||||
|
||||
TODO: Specs are a lot like queries. Should there be a
|
||||
wildcard spec object, and should specs have attributes
|
||||
like installed and known that can be queried? Or are
|
||||
these really special cases that only belong here?
|
||||
|
||||
"""
|
||||
with self.read_transaction():
|
||||
results = []
|
||||
for key, rec in self._data.items():
|
||||
if installed is not any and rec.installed != installed:
|
||||
continue
|
||||
if known is not any and spack.db.exists(rec.spec.name) != known:
|
||||
continue
|
||||
if query_spec is any or rec.spec.satisfies(query_spec):
|
||||
results.append(rec.spec)
|
||||
|
||||
return sorted(results)
|
||||
|
||||
|
||||
def query_one(self, query_spec, known=any, installed=True):
|
||||
"""Query for exactly one spec that matches the query spec.
|
||||
|
||||
Raises an assertion error if more than one spec matches the
|
||||
query. Returns None if no installed package matches.
|
||||
|
||||
"""
|
||||
concrete_specs = self.query(query_spec, known, installed)
|
||||
assert len(concrete_specs) <= 1
|
||||
return concrete_specs[0] if concrete_specs else None
|
||||
|
||||
|
||||
def missing(self, spec):
|
||||
with self.read_transaction():
|
||||
key = spec.dag_hash()
|
||||
return key in self._data and not self._data[key].installed
|
||||
|
||||
|
||||
class _Transaction(object):
|
||||
"""Simple nested transaction context manager that uses a file lock.
|
||||
|
||||
This class can trigger actions when the lock is acquired for the
|
||||
first time and released for the last.
|
||||
|
||||
Timeout for lock is customizable.
|
||||
"""
|
||||
def __init__(self, db, acquire_fn=None, release_fn=None,
|
||||
timeout=_db_lock_timeout):
|
||||
self._db = db
|
||||
self._timeout = timeout
|
||||
self._acquire_fn = acquire_fn
|
||||
self._release_fn = release_fn
|
||||
|
||||
def __enter__(self):
|
||||
if self._enter() and self._acquire_fn:
|
||||
self._acquire_fn()
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
if self._exit() and self._release_fn:
|
||||
self._release_fn()
|
||||
|
||||
|
||||
class ReadTransaction(_Transaction):
|
||||
def _enter(self):
|
||||
return self._db.lock.acquire_read(self._timeout)
|
||||
|
||||
def _exit(self):
|
||||
return self._db.lock.release_read()
|
||||
|
||||
|
||||
class WriteTransaction(_Transaction):
|
||||
def _enter(self):
|
||||
return self._db.lock.acquire_write(self._timeout)
|
||||
|
||||
def _exit(self):
|
||||
return self._db.lock.release_write()
|
||||
|
||||
|
||||
class CorruptDatabaseError(SpackError):
|
||||
def __init__(self, path, msg=''):
|
||||
super(CorruptDatabaseError, self).__init__(
|
||||
"Spack database is corrupt: %s. %s" %(path, msg))
|
||||
|
||||
|
||||
class InvalidDatabaseVersionError(SpackError):
|
||||
def __init__(self, expected, found):
|
||||
super(InvalidDatabaseVersionError, self).__init__(
|
||||
"Expected database version %s but found version %s"
|
||||
% (expected, found))
|
|
@ -238,13 +238,10 @@ def patch(pkg, url_or_filename, level=1, when=None):
|
|||
if when is None:
|
||||
when = pkg.name
|
||||
when_spec = parse_anonymous_spec(when, pkg.name)
|
||||
|
||||
if when_spec not in pkg.patches:
|
||||
pkg.patches[when_spec] = [Patch(pkg, pkg.name, url_or_filename, level)]
|
||||
else:
|
||||
# if this spec is identical to some other, then append this
|
||||
# patch to the existing list.
|
||||
pkg.patches[when_spec].append(Patch(pkg, pkg.name, url_or_filename, level))
|
||||
cur_patches = pkg.patches.setdefault(when_spec, [])
|
||||
# if this spec is identical to some other, then append this
|
||||
# patch to the existing list.
|
||||
cur_patches.append(Patch(pkg, url_or_filename, level))
|
||||
|
||||
|
||||
@directive('variants')
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
from external import yaml
|
||||
|
||||
import llnl.util.tty as tty
|
||||
from llnl.util.lang import memoized
|
||||
from llnl.util.filesystem import join_path, mkdirp
|
||||
|
||||
from spack.spec import Spec
|
||||
|
@ -187,14 +186,9 @@ def hidden_file_paths(self):
|
|||
|
||||
def relative_path_for_spec(self, spec):
|
||||
_check_concrete(spec)
|
||||
enabled_variants = (
|
||||
'-' + v.name for v in spec.variants.values()
|
||||
if v.enabled)
|
||||
|
||||
dir_name = "%s-%s%s-%s" % (
|
||||
dir_name = "%s-%s-%s" % (
|
||||
spec.name,
|
||||
spec.version,
|
||||
''.join(enabled_variants),
|
||||
spec.dag_hash(self.hash_len))
|
||||
|
||||
path = join_path(
|
||||
|
@ -263,7 +257,6 @@ def create_install_directory(self, spec):
|
|||
self.write_spec(spec, spec_file_path)
|
||||
|
||||
|
||||
@memoized
|
||||
def all_specs(self):
|
||||
if not os.path.isdir(self.root):
|
||||
return []
|
||||
|
@ -274,7 +267,6 @@ def all_specs(self):
|
|||
return [self.read_spec(s) for s in spec_files]
|
||||
|
||||
|
||||
@memoized
|
||||
def specs_by_hash(self):
|
||||
by_hash = {}
|
||||
for spec in self.all_specs():
|
||||
|
|
|
@ -55,8 +55,8 @@ def die(self):
|
|||
|
||||
def __str__(self):
|
||||
msg = self.message
|
||||
if self.long_message:
|
||||
msg += "\n %s" % self.long_message
|
||||
if self._long_message:
|
||||
msg += "\n %s" % self._long_message
|
||||
return msg
|
||||
|
||||
class UnsupportedPlatformError(SpackError):
|
||||
|
|
|
@ -438,9 +438,16 @@ def stage(self):
|
|||
raise ValueError("Can only get a stage for a concrete package.")
|
||||
|
||||
if self._stage is None:
|
||||
# Construct a mirror path (TODO: get this out of package.py)
|
||||
mp = spack.mirror.mirror_archive_path(self.spec)
|
||||
self._stage = Stage(
|
||||
self.fetcher, mirror_path=mp, name=self.spec.short_spec)
|
||||
|
||||
# Construct a path where the stage should build..
|
||||
s = self.spec
|
||||
stage_name = "%s-%s-%s" % (s.name, s.version, s.dag_hash())
|
||||
|
||||
# Build the stage
|
||||
self._stage = Stage(self.fetcher, mirror_path=mp, name=stage_name)
|
||||
|
||||
return self._stage
|
||||
|
||||
|
||||
|
@ -563,9 +570,12 @@ def installed(self):
|
|||
@property
|
||||
def installed_dependents(self):
|
||||
"""Return a list of the specs of all installed packages that depend
|
||||
on this one."""
|
||||
on this one.
|
||||
|
||||
TODO: move this method to database.py?
|
||||
"""
|
||||
dependents = []
|
||||
for spec in spack.db.installed_package_specs():
|
||||
for spec in spack.installed_db.query():
|
||||
if self.name == spec.name:
|
||||
continue
|
||||
for dep in spec.traverse():
|
||||
|
@ -785,6 +795,7 @@ def cleanup():
|
|||
"Manually remove this directory to fix:",
|
||||
self.prefix)
|
||||
|
||||
|
||||
def real_work():
|
||||
try:
|
||||
tty.msg("Building %s." % self.name)
|
||||
|
@ -844,6 +855,10 @@ def real_work():
|
|||
# Do the build.
|
||||
spack.build_environment.fork(self, real_work)
|
||||
|
||||
# note: PARENT of the build process adds the new package to
|
||||
# the database, so that we don't need to re-read from file.
|
||||
spack.installed_db.add(self.spec, self.prefix)
|
||||
|
||||
# Once everything else is done, run post install hooks
|
||||
spack.hooks.post_install(self)
|
||||
|
||||
|
@ -862,6 +877,14 @@ def do_install_dependencies(self, **kwargs):
|
|||
dep.package.do_install(**kwargs)
|
||||
|
||||
|
||||
@property
|
||||
def build_log_path(self):
|
||||
if self.installed:
|
||||
return spack.install_layout.build_log_path(self.spec)
|
||||
else:
|
||||
return join_path(self.stage.source_path, 'spack-build.out')
|
||||
|
||||
|
||||
@property
|
||||
def module(self):
|
||||
"""Use this to add variables to the class's module's scope.
|
||||
|
@ -916,6 +939,7 @@ def do_uninstall(self, force=False):
|
|||
|
||||
# Uninstalling in Spack only requires removing the prefix.
|
||||
self.remove_prefix()
|
||||
spack.installed_db.remove(self.spec)
|
||||
tty.msg("Successfully uninstalled %s." % self.spec.short_spec)
|
||||
|
||||
# Once everything else is done, run post install hooks
|
||||
|
|
|
@ -363,6 +363,11 @@ def get(self, spec, new=False):
|
|||
return self._instances[spec]
|
||||
|
||||
|
||||
def purge(self):
|
||||
"""Clear entire package instance cache."""
|
||||
self._instances.clear()
|
||||
|
||||
|
||||
@_autospec
|
||||
def providers_for(self, vpkg_spec):
|
||||
if self._provider_index is None:
|
||||
|
@ -396,6 +401,7 @@ def filename_for_package_name(self, pkg_name):
|
|||
"""
|
||||
validate_module_name(pkg_name)
|
||||
pkg_dir = self.dirname_for_package_name(pkg_name)
|
||||
|
||||
return join_path(pkg_dir, package_file_name)
|
||||
|
||||
|
||||
|
|
|
@ -41,12 +41,8 @@ class Patch(object):
|
|||
"""This class describes a patch to be applied to some expanded
|
||||
source code."""
|
||||
|
||||
def __init__(self, pkg, pkg_name, path_or_url, level):
|
||||
print pkg, pkg.name, type(pkg)
|
||||
print "pkg:", dir(pkg.__module__)
|
||||
print "NAMESPACE", pkg.namespace()
|
||||
|
||||
self.pkg_name = pkg_name
|
||||
def __init__(self, pkg, path_or_url, level):
|
||||
self.pkg_name = pkg.name
|
||||
self.path_or_url = path_or_url
|
||||
self.path = None
|
||||
self.url = None
|
||||
|
|
|
@ -641,7 +641,9 @@ def prefix(self):
|
|||
|
||||
|
||||
def dag_hash(self, length=None):
|
||||
"""Return a hash of the entire spec DAG, including connectivity."""
|
||||
"""
|
||||
Return a hash of the entire spec DAG, including connectivity.
|
||||
"""
|
||||
yaml_text = yaml.dump(
|
||||
self.to_node_dict(), default_flow_style=True, width=sys.maxint)
|
||||
sha = hashlib.sha1(yaml_text)
|
||||
|
@ -711,7 +713,7 @@ def from_yaml(stream):
|
|||
try:
|
||||
yfile = yaml.load(stream)
|
||||
except MarkedYAMLError, e:
|
||||
raise SpackYAMLError("error parsing YMAL spec:", str(e))
|
||||
raise SpackYAMLError("error parsing YAML spec:", str(e))
|
||||
|
||||
for node in yfile['spec']:
|
||||
name = next(iter(node))
|
||||
|
@ -2012,4 +2014,4 @@ def __init__(self, provided, required):
|
|||
|
||||
class SpackYAMLError(spack.error.SpackError):
|
||||
def __init__(self, msg, yaml_error):
|
||||
super(SpackError, self).__init__(msg, str(yaml_error))
|
||||
super(SpackYAMLError, self).__init__(msg, str(yaml_error))
|
||||
|
|
|
@ -261,7 +261,8 @@ def fetch(self):
|
|||
tty.debug(e)
|
||||
continue
|
||||
else:
|
||||
tty.die("All fetchers failed for %s" % self.name)
|
||||
errMessage = "All fetchers failed for %s" % self.name
|
||||
raise fs.FetchError(errMessage, None)
|
||||
|
||||
|
||||
def check(self):
|
||||
|
|
|
@ -56,7 +56,10 @@
|
|||
'spec_yaml',
|
||||
'optional_deps',
|
||||
'make_executable',
|
||||
'configure_guess']
|
||||
'configure_guess',
|
||||
'unit_install',
|
||||
'lock',
|
||||
'database']
|
||||
|
||||
|
||||
def list_tests():
|
||||
|
@ -76,7 +79,7 @@ def run(names, verbose=False):
|
|||
if test not in test_names:
|
||||
tty.error("%s is not a valid spack test name." % test,
|
||||
"Valid names are:")
|
||||
colify(test_names, indent=4)
|
||||
colify(sorted(test_names), indent=4)
|
||||
sys.exit(1)
|
||||
|
||||
runner = unittest.TextTestRunner(verbosity=verbosity)
|
||||
|
|
352
lib/spack/spack/test/database.py
Normal file
352
lib/spack/spack/test/database.py
Normal file
|
@ -0,0 +1,352 @@
|
|||
##############################################################################
|
||||
# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
|
||||
# Produced at the Lawrence Livermore National Laboratory.
|
||||
#
|
||||
# This file is part of Spack.
|
||||
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
|
||||
# LLNL-CODE-647188
|
||||
#
|
||||
# For details, see https://scalability-llnl.github.io/spack
|
||||
# Please also see the LICENSE file for our notice and the LGPL.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License (as published by
|
||||
# the Free Software Foundation) version 2.1 dated February 1999.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
|
||||
# conditions of the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
##############################################################################
|
||||
"""
|
||||
These tests check the database is functioning properly,
|
||||
both in memory and in its file
|
||||
"""
|
||||
import tempfile
|
||||
import shutil
|
||||
import multiprocessing
|
||||
|
||||
from llnl.util.lock import *
|
||||
from llnl.util.filesystem import join_path
|
||||
|
||||
import spack
|
||||
from spack.database import Database
|
||||
from spack.directory_layout import YamlDirectoryLayout
|
||||
from spack.test.mock_packages_test import *
|
||||
|
||||
from llnl.util.tty.colify import colify
|
||||
|
||||
def _print_ref_counts():
|
||||
"""Print out all ref counts for the graph used here, for debugging"""
|
||||
recs = []
|
||||
|
||||
def add_rec(spec):
|
||||
cspecs = spack.installed_db.query(spec, installed=any)
|
||||
|
||||
if not cspecs:
|
||||
recs.append("[ %-7s ] %-20s-" % ('', spec))
|
||||
else:
|
||||
key = cspecs[0].dag_hash()
|
||||
rec = spack.installed_db.get_record(cspecs[0])
|
||||
recs.append("[ %-7s ] %-20s%d" % (key[:7], spec, rec.ref_count))
|
||||
|
||||
with spack.installed_db.read_transaction():
|
||||
add_rec('mpileaks ^mpich')
|
||||
add_rec('callpath ^mpich')
|
||||
add_rec('mpich')
|
||||
|
||||
add_rec('mpileaks ^mpich2')
|
||||
add_rec('callpath ^mpich2')
|
||||
add_rec('mpich2')
|
||||
|
||||
add_rec('mpileaks ^zmpi')
|
||||
add_rec('callpath ^zmpi')
|
||||
add_rec('zmpi')
|
||||
add_rec('fake')
|
||||
|
||||
add_rec('dyninst')
|
||||
add_rec('libdwarf')
|
||||
add_rec('libelf')
|
||||
|
||||
colify(recs, cols=3)
|
||||
|
||||
|
||||
class DatabaseTest(MockPackagesTest):
|
||||
|
||||
def _mock_install(self, spec):
|
||||
s = Spec(spec)
|
||||
pkg = spack.db.get(s.concretized())
|
||||
pkg.do_install(fake=True)
|
||||
|
||||
|
||||
def _mock_remove(self, spec):
|
||||
specs = spack.installed_db.query(spec)
|
||||
assert(len(specs) == 1)
|
||||
spec = specs[0]
|
||||
spec.package.do_uninstall(spec)
|
||||
|
||||
|
||||
def setUp(self):
|
||||
super(DatabaseTest, self).setUp()
|
||||
#
|
||||
# TODO: make the mockup below easier.
|
||||
#
|
||||
|
||||
# Make a fake install directory
|
||||
self.install_path = tempfile.mkdtemp()
|
||||
self.spack_install_path = spack.install_path
|
||||
spack.install_path = self.install_path
|
||||
|
||||
self.install_layout = YamlDirectoryLayout(self.install_path)
|
||||
self.spack_install_layout = spack.install_layout
|
||||
spack.install_layout = self.install_layout
|
||||
|
||||
# Make fake database and fake install directory.
|
||||
self.installed_db = Database(self.install_path)
|
||||
self.spack_installed_db = spack.installed_db
|
||||
spack.installed_db = self.installed_db
|
||||
|
||||
# make a mock database with some packages installed note that
|
||||
# the ref count for dyninst here will be 3, as it's recycled
|
||||
# across each install.
|
||||
#
|
||||
# Here is what the mock DB looks like:
|
||||
#
|
||||
# o mpileaks o mpileaks' o mpileaks''
|
||||
# |\ |\ |\
|
||||
# | o callpath | o callpath' | o callpath''
|
||||
# |/| |/| |/|
|
||||
# o | mpich o | mpich2 o | zmpi
|
||||
# | | o | fake
|
||||
# | | |
|
||||
# | |______________/
|
||||
# | .____________/
|
||||
# |/
|
||||
# o dyninst
|
||||
# |\
|
||||
# | o libdwarf
|
||||
# |/
|
||||
# o libelf
|
||||
#
|
||||
|
||||
# Transaction used to avoid repeated writes.
|
||||
with spack.installed_db.write_transaction():
|
||||
self._mock_install('mpileaks ^mpich')
|
||||
self._mock_install('mpileaks ^mpich2')
|
||||
self._mock_install('mpileaks ^zmpi')
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
super(DatabaseTest, self).tearDown()
|
||||
shutil.rmtree(self.install_path)
|
||||
spack.install_path = self.spack_install_path
|
||||
spack.install_layout = self.spack_install_layout
|
||||
spack.installed_db = self.spack_installed_db
|
||||
|
||||
|
||||
def test_005_db_exists(self):
|
||||
"""Make sure db cache file exists after creating."""
|
||||
index_file = join_path(self.install_path, '.spack-db', 'index.yaml')
|
||||
lock_file = join_path(self.install_path, '.spack-db', 'lock')
|
||||
|
||||
self.assertTrue(os.path.exists(index_file))
|
||||
self.assertTrue(os.path.exists(lock_file))
|
||||
|
||||
|
||||
def test_010_all_install_sanity(self):
|
||||
"""Ensure that the install layout reflects what we think it does."""
|
||||
all_specs = spack.install_layout.all_specs()
|
||||
self.assertEqual(len(all_specs), 13)
|
||||
|
||||
# query specs with multiple configurations
|
||||
mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
|
||||
callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
|
||||
mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
|
||||
|
||||
self.assertEqual(len(mpileaks_specs), 3)
|
||||
self.assertEqual(len(callpath_specs), 3)
|
||||
self.assertEqual(len(mpi_specs), 3)
|
||||
|
||||
# query specs with single configurations
|
||||
dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')]
|
||||
libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')]
|
||||
libelf_specs = [s for s in all_specs if s.satisfies('libelf')]
|
||||
|
||||
self.assertEqual(len(dyninst_specs), 1)
|
||||
self.assertEqual(len(libdwarf_specs), 1)
|
||||
self.assertEqual(len(libelf_specs), 1)
|
||||
|
||||
# Query by dependency
|
||||
self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]), 1)
|
||||
self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]), 1)
|
||||
self.assertEqual(len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]), 1)
|
||||
|
||||
|
||||
def test_015_write_and_read(self):
|
||||
# write and read DB
|
||||
with spack.installed_db.write_transaction():
|
||||
specs = spack.installed_db.query()
|
||||
recs = [spack.installed_db.get_record(s) for s in specs]
|
||||
|
||||
for spec, rec in zip(specs, recs):
|
||||
new_rec = spack.installed_db.get_record(spec)
|
||||
self.assertEqual(new_rec.ref_count, rec.ref_count)
|
||||
self.assertEqual(new_rec.spec, rec.spec)
|
||||
self.assertEqual(new_rec.path, rec.path)
|
||||
self.assertEqual(new_rec.installed, rec.installed)
|
||||
|
||||
|
||||
def _check_db_sanity(self):
|
||||
"""Utiilty function to check db against install layout."""
|
||||
expected = sorted(spack.install_layout.all_specs())
|
||||
actual = sorted(self.installed_db.query())
|
||||
|
||||
self.assertEqual(len(expected), len(actual))
|
||||
for e, a in zip(expected, actual):
|
||||
self.assertEqual(e, a)
|
||||
|
||||
|
||||
def test_020_db_sanity(self):
|
||||
"""Make sure query() returns what's actually in the db."""
|
||||
self._check_db_sanity()
|
||||
|
||||
|
||||
def test_030_db_sanity_from_another_process(self):
|
||||
def read_and_modify():
|
||||
self._check_db_sanity() # check that other process can read DB
|
||||
with self.installed_db.write_transaction():
|
||||
self._mock_remove('mpileaks ^zmpi')
|
||||
|
||||
p = multiprocessing.Process(target=read_and_modify, args=())
|
||||
p.start()
|
||||
p.join()
|
||||
|
||||
# ensure child process change is visible in parent process
|
||||
with self.installed_db.read_transaction():
|
||||
self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 0)
|
||||
|
||||
|
||||
def test_040_ref_counts(self):
|
||||
"""Ensure that we got ref counts right when we read the DB."""
|
||||
self.installed_db._check_ref_counts()
|
||||
|
||||
|
||||
def test_050_basic_query(self):
|
||||
"""Ensure that querying the database is consistent with what is installed."""
|
||||
# query everything
|
||||
self.assertEqual(len(spack.installed_db.query()), 13)
|
||||
|
||||
# query specs with multiple configurations
|
||||
mpileaks_specs = self.installed_db.query('mpileaks')
|
||||
callpath_specs = self.installed_db.query('callpath')
|
||||
mpi_specs = self.installed_db.query('mpi')
|
||||
|
||||
self.assertEqual(len(mpileaks_specs), 3)
|
||||
self.assertEqual(len(callpath_specs), 3)
|
||||
self.assertEqual(len(mpi_specs), 3)
|
||||
|
||||
# query specs with single configurations
|
||||
dyninst_specs = self.installed_db.query('dyninst')
|
||||
libdwarf_specs = self.installed_db.query('libdwarf')
|
||||
libelf_specs = self.installed_db.query('libelf')
|
||||
|
||||
self.assertEqual(len(dyninst_specs), 1)
|
||||
self.assertEqual(len(libdwarf_specs), 1)
|
||||
self.assertEqual(len(libelf_specs), 1)
|
||||
|
||||
# Query by dependency
|
||||
self.assertEqual(len(self.installed_db.query('mpileaks ^mpich')), 1)
|
||||
self.assertEqual(len(self.installed_db.query('mpileaks ^mpich2')), 1)
|
||||
self.assertEqual(len(self.installed_db.query('mpileaks ^zmpi')), 1)
|
||||
|
||||
|
||||
def _check_remove_and_add_package(self, spec):
|
||||
"""Remove a spec from the DB, then add it and make sure everything's
|
||||
still ok once it is added. This checks that it was
|
||||
removed, that it's back when added again, and that ref
|
||||
counts are consistent.
|
||||
"""
|
||||
original = self.installed_db.query()
|
||||
self.installed_db._check_ref_counts()
|
||||
|
||||
# Remove spec
|
||||
concrete_spec = self.installed_db.remove(spec)
|
||||
self.installed_db._check_ref_counts()
|
||||
remaining = self.installed_db.query()
|
||||
|
||||
# ensure spec we removed is gone
|
||||
self.assertEqual(len(original) - 1, len(remaining))
|
||||
self.assertTrue(all(s in original for s in remaining))
|
||||
self.assertTrue(concrete_spec not in remaining)
|
||||
|
||||
# add it back and make sure everything is ok.
|
||||
self.installed_db.add(concrete_spec, "")
|
||||
installed = self.installed_db.query()
|
||||
self.assertEqual(len(installed), len(original))
|
||||
|
||||
# sanity check against direcory layout and check ref counts.
|
||||
self._check_db_sanity()
|
||||
self.installed_db._check_ref_counts()
|
||||
|
||||
|
||||
def test_060_remove_and_add_root_package(self):
|
||||
self._check_remove_and_add_package('mpileaks ^mpich')
|
||||
|
||||
|
||||
def test_070_remove_and_add_dependency_package(self):
|
||||
self._check_remove_and_add_package('dyninst')
|
||||
|
||||
|
||||
def test_080_root_ref_counts(self):
|
||||
rec = self.installed_db.get_record('mpileaks ^mpich')
|
||||
|
||||
# Remove a top-level spec from the DB
|
||||
self.installed_db.remove('mpileaks ^mpich')
|
||||
|
||||
# record no longer in DB
|
||||
self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), [])
|
||||
|
||||
# record's deps have updated ref_counts
|
||||
self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 0)
|
||||
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 1)
|
||||
|
||||
# put the spec back
|
||||
self.installed_db.add(rec.spec, rec.path)
|
||||
|
||||
# record is present again
|
||||
self.assertEqual(len(self.installed_db.query('mpileaks ^mpich', installed=any)), 1)
|
||||
|
||||
# dependencies have ref counts updated
|
||||
self.assertEqual(self.installed_db.get_record('callpath ^mpich').ref_count, 1)
|
||||
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2)
|
||||
|
||||
|
||||
def test_090_non_root_ref_counts(self):
|
||||
mpileaks_mpich_rec = self.installed_db.get_record('mpileaks ^mpich')
|
||||
callpath_mpich_rec = self.installed_db.get_record('callpath ^mpich')
|
||||
|
||||
# "force remove" a non-root spec from the DB
|
||||
self.installed_db.remove('callpath ^mpich')
|
||||
|
||||
# record still in DB but marked uninstalled
|
||||
self.assertEqual(self.installed_db.query('callpath ^mpich', installed=True), [])
|
||||
self.assertEqual(len(self.installed_db.query('callpath ^mpich', installed=any)), 1)
|
||||
|
||||
# record and its deps have same ref_counts
|
||||
self.assertEqual(self.installed_db.get_record('callpath ^mpich', installed=any).ref_count, 1)
|
||||
self.assertEqual(self.installed_db.get_record('mpich').ref_count, 2)
|
||||
|
||||
# remove only dependent of uninstalled callpath record
|
||||
self.installed_db.remove('mpileaks ^mpich')
|
||||
|
||||
# record and parent are completely gone.
|
||||
self.assertEqual(self.installed_db.query('mpileaks ^mpich', installed=any), [])
|
||||
self.assertEqual(self.installed_db.query('callpath ^mpich', installed=any), [])
|
||||
|
||||
# mpich ref count updated properly.
|
||||
mpich_rec = self.installed_db.get_record('mpich')
|
||||
self.assertEqual(mpich_rec.ref_count, 0)
|
266
lib/spack/spack/test/lock.py
Normal file
266
lib/spack/spack/test/lock.py
Normal file
|
@ -0,0 +1,266 @@
|
|||
##############################################################################
|
||||
# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
|
||||
# Produced at the Lawrence Livermore National Laboratory.
|
||||
#
|
||||
# This file is part of Spack.
|
||||
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
|
||||
# LLNL-CODE-647188
|
||||
#
|
||||
# For details, see https://scalability-llnl.github.io/spack
|
||||
# Please also see the LICENSE file for our notice and the LGPL.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License (as published by
|
||||
# the Free Software Foundation) version 2.1 dated February 1999.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
|
||||
# conditions of the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
##############################################################################
|
||||
"""
|
||||
These tests ensure that our lock works correctly.
|
||||
"""
|
||||
import unittest
|
||||
import os
|
||||
import tempfile
|
||||
import shutil
|
||||
from multiprocessing import Process
|
||||
|
||||
from llnl.util.lock import *
|
||||
from llnl.util.filesystem import join_path, touch
|
||||
|
||||
from spack.util.multiproc import Barrier
|
||||
|
||||
# This is the longest a failed test will take, as the barriers will
|
||||
# time out and raise an exception.
|
||||
barrier_timeout = 5
|
||||
|
||||
|
||||
class LockTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
self.lock_path = join_path(self.tempdir, 'lockfile')
|
||||
touch(self.lock_path)
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tempdir, ignore_errors=True)
|
||||
|
||||
|
||||
def multiproc_test(self, *functions):
|
||||
"""Order some processes using simple barrier synchronization."""
|
||||
b = Barrier(len(functions), timeout=barrier_timeout)
|
||||
procs = [Process(target=f, args=(b,)) for f in functions]
|
||||
for p in procs: p.start()
|
||||
for p in procs:
|
||||
p.join()
|
||||
self.assertEqual(p.exitcode, 0)
|
||||
|
||||
|
||||
#
|
||||
# Process snippets below can be composed into tests.
|
||||
#
|
||||
def acquire_write(self, barrier):
|
||||
lock = Lock(self.lock_path)
|
||||
lock.acquire_write() # grab exclusive lock
|
||||
barrier.wait()
|
||||
barrier.wait() # hold the lock until exception raises in other procs.
|
||||
|
||||
def acquire_read(self, barrier):
|
||||
lock = Lock(self.lock_path)
|
||||
lock.acquire_read() # grab shared lock
|
||||
barrier.wait()
|
||||
barrier.wait() # hold the lock until exception raises in other procs.
|
||||
|
||||
def timeout_write(self, barrier):
|
||||
lock = Lock(self.lock_path)
|
||||
barrier.wait() # wait for lock acquire in first process
|
||||
self.assertRaises(LockError, lock.acquire_write, 0.1)
|
||||
barrier.wait()
|
||||
|
||||
def timeout_read(self, barrier):
|
||||
lock = Lock(self.lock_path)
|
||||
barrier.wait() # wait for lock acquire in first process
|
||||
self.assertRaises(LockError, lock.acquire_read, 0.1)
|
||||
barrier.wait()
|
||||
|
||||
|
||||
#
|
||||
# Test that exclusive locks on other processes time out when an
|
||||
# exclusive lock is held.
|
||||
#
|
||||
def test_write_lock_timeout_on_write(self):
|
||||
self.multiproc_test(self.acquire_write, self.timeout_write)
|
||||
|
||||
def test_write_lock_timeout_on_write_2(self):
|
||||
self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write)
|
||||
|
||||
def test_write_lock_timeout_on_write_3(self):
|
||||
self.multiproc_test(self.acquire_write, self.timeout_write, self.timeout_write, self.timeout_write)
|
||||
|
||||
|
||||
#
|
||||
# Test that shared locks on other processes time out when an
|
||||
# exclusive lock is held.
|
||||
#
|
||||
def test_read_lock_timeout_on_write(self):
|
||||
self.multiproc_test(self.acquire_write, self.timeout_read)
|
||||
|
||||
def test_read_lock_timeout_on_write_2(self):
|
||||
self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read)
|
||||
|
||||
def test_read_lock_timeout_on_write_3(self):
|
||||
self.multiproc_test(self.acquire_write, self.timeout_read, self.timeout_read, self.timeout_read)
|
||||
|
||||
|
||||
#
|
||||
# Test that exclusive locks time out when shared locks are held.
|
||||
#
|
||||
def test_write_lock_timeout_on_read(self):
|
||||
self.multiproc_test(self.acquire_read, self.timeout_write)
|
||||
|
||||
def test_write_lock_timeout_on_read_2(self):
|
||||
self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write)
|
||||
|
||||
def test_write_lock_timeout_on_read_3(self):
|
||||
self.multiproc_test(self.acquire_read, self.timeout_write, self.timeout_write, self.timeout_write)
|
||||
|
||||
|
||||
#
|
||||
# Test that exclusive locks time while lots of shared locks are held.
|
||||
#
|
||||
def test_write_lock_timeout_with_multiple_readers_2_1(self):
|
||||
self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write)
|
||||
|
||||
def test_write_lock_timeout_with_multiple_readers_2_2(self):
|
||||
self.multiproc_test(self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write)
|
||||
|
||||
def test_write_lock_timeout_with_multiple_readers_3_1(self):
|
||||
self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write)
|
||||
|
||||
def test_write_lock_timeout_with_multiple_readers_3_2(self):
|
||||
self.multiproc_test(self.acquire_read, self.acquire_read, self.acquire_read, self.timeout_write, self.timeout_write)
|
||||
|
||||
|
||||
#
|
||||
# Longer test case that ensures locks are reusable. Ordering is
|
||||
# enforced by barriers throughout -- steps are shown with numbers.
|
||||
#
|
||||
def test_complex_acquire_and_release_chain(self):
|
||||
def p1(barrier):
|
||||
lock = Lock(self.lock_path)
|
||||
|
||||
lock.acquire_write()
|
||||
barrier.wait() # ---------------------------------------- 1
|
||||
# others test timeout
|
||||
barrier.wait() # ---------------------------------------- 2
|
||||
lock.release_write() # release and others acquire read
|
||||
barrier.wait() # ---------------------------------------- 3
|
||||
self.assertRaises(LockError, lock.acquire_write, 0.1)
|
||||
lock.acquire_read()
|
||||
barrier.wait() # ---------------------------------------- 4
|
||||
lock.release_read()
|
||||
barrier.wait() # ---------------------------------------- 5
|
||||
|
||||
# p2 upgrades read to write
|
||||
barrier.wait() # ---------------------------------------- 6
|
||||
self.assertRaises(LockError, lock.acquire_write, 0.1)
|
||||
self.assertRaises(LockError, lock.acquire_read, 0.1)
|
||||
barrier.wait() # ---------------------------------------- 7
|
||||
# p2 releases write and read
|
||||
barrier.wait() # ---------------------------------------- 8
|
||||
|
||||
# p3 acquires read
|
||||
barrier.wait() # ---------------------------------------- 9
|
||||
# p3 upgrades read to write
|
||||
barrier.wait() # ---------------------------------------- 10
|
||||
self.assertRaises(LockError, lock.acquire_write, 0.1)
|
||||
self.assertRaises(LockError, lock.acquire_read, 0.1)
|
||||
barrier.wait() # ---------------------------------------- 11
|
||||
# p3 releases locks
|
||||
barrier.wait() # ---------------------------------------- 12
|
||||
lock.acquire_read()
|
||||
barrier.wait() # ---------------------------------------- 13
|
||||
lock.release_read()
|
||||
|
||||
|
||||
def p2(barrier):
|
||||
lock = Lock(self.lock_path)
|
||||
|
||||
# p1 acquires write
|
||||
barrier.wait() # ---------------------------------------- 1
|
||||
self.assertRaises(LockError, lock.acquire_write, 0.1)
|
||||
self.assertRaises(LockError, lock.acquire_read, 0.1)
|
||||
barrier.wait() # ---------------------------------------- 2
|
||||
lock.acquire_read()
|
||||
barrier.wait() # ---------------------------------------- 3
|
||||
# p1 tests shared read
|
||||
barrier.wait() # ---------------------------------------- 4
|
||||
# others release reads
|
||||
barrier.wait() # ---------------------------------------- 5
|
||||
|
||||
lock.acquire_write() # upgrade read to write
|
||||
barrier.wait() # ---------------------------------------- 6
|
||||
# others test timeout
|
||||
barrier.wait() # ---------------------------------------- 7
|
||||
lock.release_write() # release read AND write (need both)
|
||||
lock.release_read()
|
||||
barrier.wait() # ---------------------------------------- 8
|
||||
|
||||
# p3 acquires read
|
||||
barrier.wait() # ---------------------------------------- 9
|
||||
# p3 upgrades read to write
|
||||
barrier.wait() # ---------------------------------------- 10
|
||||
self.assertRaises(LockError, lock.acquire_write, 0.1)
|
||||
self.assertRaises(LockError, lock.acquire_read, 0.1)
|
||||
barrier.wait() # ---------------------------------------- 11
|
||||
# p3 releases locks
|
||||
barrier.wait() # ---------------------------------------- 12
|
||||
lock.acquire_read()
|
||||
barrier.wait() # ---------------------------------------- 13
|
||||
lock.release_read()
|
||||
|
||||
|
||||
def p3(barrier):
|
||||
lock = Lock(self.lock_path)
|
||||
|
||||
# p1 acquires write
|
||||
barrier.wait() # ---------------------------------------- 1
|
||||
self.assertRaises(LockError, lock.acquire_write, 0.1)
|
||||
self.assertRaises(LockError, lock.acquire_read, 0.1)
|
||||
barrier.wait() # ---------------------------------------- 2
|
||||
lock.acquire_read()
|
||||
barrier.wait() # ---------------------------------------- 3
|
||||
# p1 tests shared read
|
||||
barrier.wait() # ---------------------------------------- 4
|
||||
lock.release_read()
|
||||
barrier.wait() # ---------------------------------------- 5
|
||||
|
||||
# p2 upgrades read to write
|
||||
barrier.wait() # ---------------------------------------- 6
|
||||
self.assertRaises(LockError, lock.acquire_write, 0.1)
|
||||
self.assertRaises(LockError, lock.acquire_read, 0.1)
|
||||
barrier.wait() # ---------------------------------------- 7
|
||||
# p2 releases write & read
|
||||
barrier.wait() # ---------------------------------------- 8
|
||||
|
||||
lock.acquire_read()
|
||||
barrier.wait() # ---------------------------------------- 9
|
||||
lock.acquire_write()
|
||||
barrier.wait() # ---------------------------------------- 10
|
||||
# others test timeout
|
||||
barrier.wait() # ---------------------------------------- 11
|
||||
lock.release_read() # release read AND write in opposite
|
||||
lock.release_write() # order from before on p2
|
||||
barrier.wait() # ---------------------------------------- 12
|
||||
lock.acquire_read()
|
||||
barrier.wait() # ---------------------------------------- 13
|
||||
lock.release_read()
|
||||
|
||||
self.multiproc_test(p1, p2, p3)
|
121
lib/spack/spack/test/unit_install.py
Normal file
121
lib/spack/spack/test/unit_install.py
Normal file
|
@ -0,0 +1,121 @@
|
|||
##############################################################################
|
||||
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
|
||||
# Produced at the Lawrence Livermore National Laboratory.
|
||||
#
|
||||
# This file is part of Spack.
|
||||
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
|
||||
# LLNL-CODE-647188
|
||||
#
|
||||
# For details, see https://scalability-llnl.github.io/spack
|
||||
# Please also see the LICENSE file for our notice and the LGPL.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License (as published by
|
||||
# the Free Software Foundation) version 2.1 dated February 1999.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful, but
|
||||
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
|
||||
# conditions of the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with this program; if not, write to the Free Software Foundation,
|
||||
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
##############################################################################
|
||||
import unittest
|
||||
import itertools
|
||||
|
||||
import spack
|
||||
test_install = __import__("spack.cmd.test-install",
|
||||
fromlist=["BuildId", "create_test_output", "TestResult"])
|
||||
|
||||
class MockOutput(object):
|
||||
def __init__(self):
|
||||
self.results = {}
|
||||
|
||||
def add_test(self, buildId, passed=True, buildInfo=None):
|
||||
self.results[buildId] = passed
|
||||
|
||||
def write_to(self, stream):
|
||||
pass
|
||||
|
||||
class MockSpec(object):
|
||||
def __init__(self, name, version, hashStr=None):
|
||||
self.dependencies = {}
|
||||
self.name = name
|
||||
self.version = version
|
||||
self.hash = hashStr if hashStr else hash((name, version))
|
||||
|
||||
def traverse(self, order=None):
|
||||
allDeps = itertools.chain.from_iterable(i.traverse() for i in
|
||||
self.dependencies.itervalues())
|
||||
return set(itertools.chain([self], allDeps))
|
||||
|
||||
def dag_hash(self):
|
||||
return self.hash
|
||||
|
||||
def to_yaml(self):
|
||||
return "<<<MOCK YAML {0}>>>".format(test_install.BuildId(self).stringId())
|
||||
|
||||
class MockPackage(object):
|
||||
def __init__(self, buildLogPath):
|
||||
self.installed = False
|
||||
self.build_log_path = buildLogPath
|
||||
|
||||
specX = MockSpec("X", "1.2.0")
|
||||
specY = MockSpec("Y", "2.3.8")
|
||||
specX.dependencies['Y'] = specY
|
||||
pkgX = MockPackage('logX')
|
||||
pkgY = MockPackage('logY')
|
||||
bIdX = test_install.BuildId(specX)
|
||||
bIdY = test_install.BuildId(specY)
|
||||
|
||||
class UnitInstallTest(unittest.TestCase):
|
||||
"""Tests test-install where X->Y"""
|
||||
|
||||
def setUp(self):
|
||||
super(UnitInstallTest, self).setUp()
|
||||
|
||||
pkgX.installed = False
|
||||
pkgY.installed = False
|
||||
|
||||
pkgDb = MockPackageDb({specX:pkgX, specY:pkgY})
|
||||
spack.db = pkgDb
|
||||
|
||||
def tearDown(self):
|
||||
super(UnitInstallTest, self).tearDown()
|
||||
|
||||
def test_installing_both(self):
|
||||
mo = MockOutput()
|
||||
|
||||
pkgX.installed = True
|
||||
pkgY.installed = True
|
||||
test_install.create_test_output(specX, [specX, specY], mo, getLogFunc=test_fetch_log)
|
||||
|
||||
self.assertEqual(mo.results,
|
||||
{bIdX:test_install.TestResult.PASSED,
|
||||
bIdY:test_install.TestResult.PASSED})
|
||||
|
||||
def test_dependency_already_installed(self):
|
||||
mo = MockOutput()
|
||||
|
||||
pkgX.installed = True
|
||||
pkgY.installed = True
|
||||
test_install.create_test_output(specX, [specX], mo, getLogFunc=test_fetch_log)
|
||||
|
||||
self.assertEqual(mo.results, {bIdX:test_install.TestResult.PASSED})
|
||||
|
||||
#TODO: add test(s) where Y fails to install
|
||||
|
||||
class MockPackageDb(object):
|
||||
def __init__(self, init=None):
|
||||
self.specToPkg = {}
|
||||
if init:
|
||||
self.specToPkg.update(init)
|
||||
|
||||
def get(self, spec):
|
||||
return self.specToPkg[spec]
|
||||
|
||||
def test_fetch_log(path):
|
||||
return []
|
||||
|
|
@ -209,8 +209,8 @@ def parse_version_offset(path):
|
|||
# e.g. foobar-4.5.1
|
||||
(r'-((\d+\.)*\d+)$', stem),
|
||||
|
||||
# e.g. foobar-4.5.1b
|
||||
(r'-((\d+\.)*\d+\-?([a-z]|rc|RC|tp|TP)\d*)$', stem),
|
||||
# e.g. foobar-4.5.1b, foobar4.5RC, foobar.v4.5.1b
|
||||
(r'[-._]?v?((\d+\.)*\d+[-._]?([a-z]|rc|RC|tp|TP?)\d*)$', stem),
|
||||
|
||||
# e.g. foobar-4.5.0-beta1, or foobar-4.50-beta
|
||||
(r'-((\d+\.)*\d+-beta(\d+)?)$', stem),
|
||||
|
|
|
@ -27,9 +27,11 @@
|
|||
than multiprocessing.Pool.apply() can. For example, apply() will fail
|
||||
to pickle functions if they're passed indirectly as parameters.
|
||||
"""
|
||||
from multiprocessing import Process, Pipe
|
||||
from multiprocessing import Process, Pipe, Semaphore, Value
|
||||
from itertools import izip
|
||||
|
||||
__all__ = ['spawn', 'parmap', 'Barrier']
|
||||
|
||||
def spawn(f):
|
||||
def fun(pipe,x):
|
||||
pipe.send(f(x))
|
||||
|
@ -43,3 +45,49 @@ def parmap(f,X):
|
|||
[p.join() for p in proc]
|
||||
return [p.recv() for (p,c) in pipe]
|
||||
|
||||
|
||||
class Barrier:
|
||||
"""Simple reusable semaphore barrier.
|
||||
|
||||
Python 2.6 doesn't have multiprocessing barriers so we implement this.
|
||||
|
||||
See http://greenteapress.com/semaphores/downey08semaphores.pdf, p. 41.
|
||||
"""
|
||||
def __init__(self, n, timeout=None):
|
||||
self.n = n
|
||||
self.to = timeout
|
||||
self.count = Value('i', 0)
|
||||
self.mutex = Semaphore(1)
|
||||
self.turnstile1 = Semaphore(0)
|
||||
self.turnstile2 = Semaphore(1)
|
||||
|
||||
|
||||
def wait(self):
|
||||
if not self.mutex.acquire(timeout=self.to):
|
||||
raise BarrierTimeoutError()
|
||||
self.count.value += 1
|
||||
if self.count.value == self.n:
|
||||
if not self.turnstile2.acquire(timeout=self.to):
|
||||
raise BarrierTimeoutError()
|
||||
self.turnstile1.release()
|
||||
self.mutex.release()
|
||||
|
||||
if not self.turnstile1.acquire(timeout=self.to):
|
||||
raise BarrierTimeoutError()
|
||||
self.turnstile1.release()
|
||||
|
||||
if not self.mutex.acquire(timeout=self.to):
|
||||
raise BarrierTimeoutError()
|
||||
self.count.value -= 1
|
||||
if self.count.value == 0:
|
||||
if not self.turnstile1.acquire(timeout=self.to):
|
||||
raise BarrierTimeoutError()
|
||||
self.turnstile2.release()
|
||||
self.mutex.release()
|
||||
|
||||
if not self.turnstile2.acquire(timeout=self.to):
|
||||
raise BarrierTimeoutError()
|
||||
self.turnstile2.release()
|
||||
|
||||
|
||||
class BarrierTimeoutError: pass
|
||||
|
|
|
@ -73,10 +73,8 @@ def update(self, spec):
|
|||
for provided_spec, provider_spec in pkg.provided.iteritems():
|
||||
if provider_spec.satisfies(spec, deps=False):
|
||||
provided_name = provided_spec.name
|
||||
if provided_name not in self.providers:
|
||||
self.providers[provided_name] = {}
|
||||
|
||||
provider_map = self.providers[provided_name]
|
||||
provider_map = self.providers.setdefault(provided_name, {})
|
||||
if not provided_spec in provider_map:
|
||||
provider_map[provided_spec] = set()
|
||||
|
||||
|
@ -133,9 +131,7 @@ def _cross_provider_maps(self, lmap, rmap):
|
|||
if lp_spec.name == rp_spec.name:
|
||||
try:
|
||||
const = lp_spec.copy().constrain(rp_spec,deps=False)
|
||||
if constrained not in result:
|
||||
result[constrained] = set()
|
||||
result[constrained].add(const)
|
||||
result.setdefault(constrained, set()).add(const)
|
||||
except spack.spec.UnsatisfiableSpecError:
|
||||
continue
|
||||
return result
|
||||
|
|
BIN
share/spack/logo/favicon.ico
Executable file
BIN
share/spack/logo/favicon.ico
Executable file
Binary file not shown.
After Width: | Height: | Size: 1.1 KiB |
BIN
share/spack/logo/spack-logo-text-64.png
Normal file
BIN
share/spack/logo/spack-logo-text-64.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 18 KiB |
BIN
share/spack/logo/spack-logo-white-text-48.png
Normal file
BIN
share/spack/logo/spack-logo-white-text-48.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 12 KiB |
15
var/spack/packages/activeharmony/package.py
Normal file
15
var/spack/packages/activeharmony/package.py
Normal file
|
@ -0,0 +1,15 @@
|
|||
from spack import *
|
||||
|
||||
class Activeharmony(Package):
|
||||
"""Active Harmony: a framework for auto-tuning (the automated search for values to improve the performance of a target application)."""
|
||||
homepage = "http://www.dyninst.org/harmony"
|
||||
url = "http://www.dyninst.org/sites/default/files/downloads/harmony/ah-4.5.tar.gz"
|
||||
|
||||
version('4.5', 'caee5b864716d376e2c25d739251b2a9')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
make("CFLAGS=-O3")
|
||||
make("install", 'PREFIX=%s' % prefix)
|
||||
|
||||
from spack import *
|
||||
|
34
var/spack/packages/apex/package.py
Normal file
34
var/spack/packages/apex/package.py
Normal file
|
@ -0,0 +1,34 @@
|
|||
from spack import *
|
||||
from spack.util.environment import *
|
||||
|
||||
class Apex(Package):
|
||||
homepage = "http://github.com/khuck/xpress-apex"
|
||||
#url = "http://github.com/khuck/xpress-apex/archive/v0.1-release-candidate.tar.gz"
|
||||
url = "http://github.com/khuck/xpress-apex"
|
||||
|
||||
#version('0.1', '6e039c224387348296739f6bf360d081')
|
||||
#version('master', branch='master', git='https://github.com/khuck/xpress-apex.git')
|
||||
version('2015-10-21', git='https://github.com/khuck/xpress-apex.git', commit='d2e66ddde689120472fc57fc546d8cd80aab745c')
|
||||
|
||||
depends_on("binutils+libiberty")
|
||||
depends_on("boost@1.54:")
|
||||
depends_on("cmake@2.8.12:")
|
||||
depends_on("activeharmony@4.5:")
|
||||
depends_on("ompt-openmp")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
|
||||
path=get_path("PATH")
|
||||
path.remove(spec["binutils"].prefix.bin)
|
||||
path_set("PATH", path)
|
||||
with working_dir("build", create=True):
|
||||
cmake('-DBOOST_ROOT=%s' % spec['boost'].prefix,
|
||||
'-DUSE_BFD=TRUE',
|
||||
'-DBFD_ROOT=%s' % spec['binutils'].prefix,
|
||||
'-DUSE_ACTIVEHARMONY=TRUE',
|
||||
'-DACTIVEHARMONY_ROOT=%s' % spec['activeharmony'].prefix,
|
||||
'-DUSE_OMPT=TRUE',
|
||||
'-DOMPT_ROOT=%s' % spec['ompt-openmp'].prefix,
|
||||
'..', *std_cmake_args)
|
||||
make()
|
||||
make("install")
|
|
@ -10,8 +10,21 @@ class Binutils(Package):
|
|||
version('2.23.2', '4f8fa651e35ef262edc01d60fb45702e')
|
||||
version('2.20.1', '2b9dc8f2b7dbd5ec5992c6e29de0b764')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=%s" % prefix)
|
||||
variant('libiberty', default=False, description='Also install libiberty.')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure_args = [
|
||||
'--prefix=%s' % prefix,
|
||||
'--disable-dependency-tracking',
|
||||
'--enable-interwork',
|
||||
'--enable-multilib',
|
||||
'--enable-shared',
|
||||
'--enable-64-bit-bfd',
|
||||
'--enable-targets=all']
|
||||
|
||||
if '+libiberty' in spec:
|
||||
configure_args.append('--enable-install-libiberty')
|
||||
|
||||
configure(*configure_args)
|
||||
make()
|
||||
make("install")
|
||||
|
|
|
@ -14,6 +14,10 @@ class Boost(Package):
|
|||
list_url = "http://sourceforge.net/projects/boost/files/boost/"
|
||||
list_depth = 2
|
||||
|
||||
version('1.59.0', '6aa9a5c6a4ca1016edd0ed1178e3cb87')
|
||||
version('1.58.0', 'b8839650e61e9c1c0a89f371dd475546')
|
||||
version('1.57.0', '1be49befbdd9a5ce9def2983ba3e7b76')
|
||||
version('1.56.0', 'a744cf167b05d72335f27c88115f211d')
|
||||
version('1.55.0', 'd6eef4b4cacb2183f2bf265a5a03a354')
|
||||
version('1.54.0', '15cb8c0803064faef0c4ddf5bc5ca279')
|
||||
version('1.53.0', 'a00d22605d5dbcfb4c9936a9b35bc4c2')
|
||||
|
|
16
var/spack/packages/bowtie2/bowtie2-2.5.patch
Normal file
16
var/spack/packages/bowtie2/bowtie2-2.5.patch
Normal file
|
@ -0,0 +1,16 @@
|
|||
--- Makefile 2015-02-26 10:50:00.000000000 -0800
|
||||
+++ Makefile.new 2015-07-29 18:03:59.891357399 -0700
|
||||
@@ -22,10 +22,10 @@
|
||||
#
|
||||
|
||||
INC =
|
||||
-GCC_PREFIX = $(shell dirname `which gcc`)
|
||||
+GCC_PREFIX =
|
||||
GCC_SUFFIX =
|
||||
-CC = $(GCC_PREFIX)/gcc$(GCC_SUFFIX)
|
||||
-CPP = $(GCC_PREFIX)/g++$(GCC_SUFFIX)
|
||||
+CC = cc
|
||||
+CPP = c++
|
||||
CXX = $(CPP)
|
||||
HEADERS = $(wildcard *.h)
|
||||
BOWTIE_MM = 1
|
24
var/spack/packages/bowtie2/package.py
Normal file
24
var/spack/packages/bowtie2/package.py
Normal file
|
@ -0,0 +1,24 @@
|
|||
from spack import *
|
||||
from glob import glob
|
||||
class Bowtie2(Package):
|
||||
"""Description"""
|
||||
homepage = "bowtie-bio.sourceforge.net/bowtie2/index.shtml"
|
||||
version('2.2.5','51fa97a862d248d7ee660efc1147c75f', url = "http://downloads.sourceforge.net/project/bowtie-bio/bowtie2/2.2.5/bowtie2-2.2.5-source.zip")
|
||||
|
||||
patch('bowtie2-2.5.patch',when='@2.2.5', level=0)
|
||||
|
||||
def install(self, spec, prefix):
|
||||
make()
|
||||
mkdirp(prefix.bin)
|
||||
for bow in glob("bowtie2*"):
|
||||
install(bow, prefix.bin)
|
||||
# install('bowtie2',prefix.bin)
|
||||
# install('bowtie2-align-l',prefix.bin)
|
||||
# install('bowtie2-align-s',prefix.bin)
|
||||
# install('bowtie2-build',prefix.bin)
|
||||
# install('bowtie2-build-l',prefix.bin)
|
||||
# install('bowtie2-build-s',prefix.bin)
|
||||
# install('bowtie2-inspect',prefix.bin)
|
||||
# install('bowtie2-inspect-l',prefix.bin)
|
||||
# install('bowtie2-inspect-s',prefix.bin)
|
||||
|
|
@ -22,8 +22,10 @@ def install(self, spec, prefix):
|
|||
|
||||
bzip2_exe = join_path(prefix.bin, 'bzip2')
|
||||
install('bzip2-shared', bzip2_exe)
|
||||
for libfile in glob('libbz2.so*'):
|
||||
for i, libfile in enumerate(glob('libbz2.so*')):
|
||||
install(libfile, prefix.lib)
|
||||
if i == 0:
|
||||
symlink(join_path(prefix.lib, libfile), join_path(prefix.lib, 'libbz2.so'))
|
||||
|
||||
bunzip2 = join_path(prefix.bin, 'bunzip2')
|
||||
remove(bunzip2)
|
||||
|
|
|
@ -28,11 +28,16 @@ class Clang(Package):
|
|||
"""The goal of the Clang project is to create a new C, C++,
|
||||
Objective C and Objective C++ front-end for the LLVM compiler.
|
||||
"""
|
||||
homepage = "http://clang.llvm.org"
|
||||
list_url = "http://llvm.org/releases/download.html"
|
||||
homepage = 'http://clang.llvm.org'
|
||||
url = 'http://llvm.org/releases/3.7.0/cfe-3.7.0.src.tar.xz'
|
||||
|
||||
depends_on("llvm")
|
||||
version('3.4.2', '87945973b7c73038871c5f849a818588', url='http://llvm.org/releases/3.4.2/cfe-3.4.2.src.tar.xz')
|
||||
depends_on('llvm@3.7.0', when='@3.7.0')
|
||||
depends_on('llvm@3.6.2', when='@3.6.2')
|
||||
depends_on('llvm@3.5.1', when='@3.5.1')
|
||||
|
||||
version('3.7.0', '8f9d27335e7331cf0a4711e952f21f01', url='http://llvm.org/releases/3.7.0/cfe-3.7.0.src.tar.xz')
|
||||
version('3.6.2', 'ff862793682f714bb7862325b9c06e20', url='http://llvm.org/releases/3.6.2/cfe-3.6.2.src.tar.xz')
|
||||
version('3.5.1', '93f9532f8f7e6f1d8e5c1116907051cb', url='http://llvm.org/releases/3.5.1/cfe-3.5.1.src.tar.xz')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
env['CXXFLAGS'] = self.compiler.cxx11_flag
|
||||
|
|
|
@ -20,7 +20,9 @@ class Dbus(Package):
|
|||
version('1.8.2', 'd6f709bbec0a022a1847c7caec9d6068')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=%s" % prefix)
|
||||
configure(
|
||||
"--prefix=%s" % prefix,
|
||||
"--disable-systemd")
|
||||
make()
|
||||
make("install")
|
||||
|
||||
|
|
25
var/spack/packages/doxygen/package.py
Normal file
25
var/spack/packages/doxygen/package.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
#------------------------------------------------------------------------------
|
||||
# Author: Justin Too <justin@doubleotoo.com>
|
||||
# Date: September 11, 2015
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
from spack import *
|
||||
|
||||
class Doxygen(Package):
|
||||
"""Doxygen is the de facto standard tool for generating documentation
|
||||
from annotated C++ sources, but it also supports other popular programming
|
||||
languages such as C, Objective-C, C#, PHP, Java, Python, IDL (Corba,
|
||||
Microsoft, and UNO/OpenOffice flavors), Fortran, VHDL, Tcl, and to some extent D..
|
||||
"""
|
||||
homepage = "http://www.stack.nl/~dimitri/doxygen/"
|
||||
url = "http://ftp.stack.nl/pub/users/dimitri/doxygen-1.8.10.src.tar.gz"
|
||||
|
||||
version('1.8.10', '79767ccd986f12a0f949015efb5f058f')
|
||||
|
||||
depends_on("cmake@2.8.12:")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
cmake('.', *std_cmake_args)
|
||||
|
||||
make()
|
||||
make("install")
|
26
var/spack/packages/elfutils/package.py
Normal file
26
var/spack/packages/elfutils/package.py
Normal file
|
@ -0,0 +1,26 @@
|
|||
from spack import *
|
||||
|
||||
class Elfutils(Package):
|
||||
"""elfutils is a collection of various binary tools such as
|
||||
eu-objdump, eu-readelf, and other utilities that allow you to
|
||||
inspect and manipulate ELF files. Refer to Table 5.Tools Included
|
||||
in elfutils for Red Hat Developer for a complete list of binary
|
||||
tools that are distributed with the Red Hat Developer Toolset
|
||||
version of elfutils."""
|
||||
|
||||
homepage = "https://fedorahosted.org/elfutils/"
|
||||
|
||||
version('0.163',
|
||||
git='git://git.fedorahosted.org/git/elfutils.git',
|
||||
tag='elfutils-0.163')
|
||||
|
||||
provides('elf')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
autoreconf = which('autoreconf')
|
||||
autoreconf('-if')
|
||||
|
||||
configure('--prefix=%s' % prefix, '--enable-maintainer-mode')
|
||||
make()
|
||||
make("install")
|
||||
|
18
var/spack/packages/fish/package.py
Normal file
18
var/spack/packages/fish/package.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
from spack import *
|
||||
|
||||
class Fish(Package):
|
||||
"""fish is a smart and user-friendly command line shell for OS X, Linux, and
|
||||
the rest of the family.
|
||||
"""
|
||||
|
||||
homepage = "http://fishshell.com/"
|
||||
url = "http://fishshell.com/files/2.2.0/fish-2.2.0.tar.gz"
|
||||
list_url = homepage
|
||||
|
||||
version('2.2.0', 'a76339fd14ce2ec229283c53e805faac48c3e99d9e3ede9d82c0554acfc7b77a')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure('--prefix=%s' % prefix)
|
||||
|
||||
make()
|
||||
make("install")
|
|
@ -36,21 +36,25 @@ class Gcc(Package):
|
|||
list_url = 'http://open-source-box.org/gcc/'
|
||||
list_depth = 2
|
||||
|
||||
DEPENDS_ON_ISL_PREDICATE = '@5.0:'
|
||||
|
||||
version('5.2.0', 'a51bcfeb3da7dd4c623e27207ed43467')
|
||||
version('4.9.3', '6f831b4d251872736e8e9cc09746f327')
|
||||
version('4.9.2', '4df8ee253b7f3863ad0b86359cd39c43')
|
||||
version('4.9.1', 'fddf71348546af523353bd43d34919c1')
|
||||
version('4.8.5', '80d2c2982a3392bb0b89673ff136e223')
|
||||
version('4.8.4', '5a84a30839b2aca22a2d723de2a626ec')
|
||||
version('4.7.4', '4c696da46297de6ae77a82797d2abe28')
|
||||
version('4.6.4', 'b407a3d1480c11667f293bfb1f17d1a4')
|
||||
version('4.5.4', '27e459c2566b8209ab064570e1b378f7')
|
||||
|
||||
|
||||
depends_on("mpfr")
|
||||
depends_on("gmp")
|
||||
depends_on("mpc") # when @4.5:
|
||||
depends_on("libelf")
|
||||
depends_on("binutils")
|
||||
depends_on("binutils~libiberty")
|
||||
|
||||
# Save these until we can do optional deps.
|
||||
#depends_on("isl")
|
||||
depends_on("isl", when=DEPENDS_ON_ISL_PREDICATE)
|
||||
#depends_on("ppl")
|
||||
#depends_on("cloog")
|
||||
|
||||
|
@ -62,23 +66,31 @@ def install(self, spec, prefix):
|
|||
if spec.satisfies("@4.7.1:"):
|
||||
enabled_languages.add('go')
|
||||
|
||||
# Generic options to compile GCC
|
||||
options = ["--prefix=%s" % prefix,
|
||||
"--libdir=%s/lib64" % prefix,
|
||||
"--disable-multilib",
|
||||
"--enable-languages=" + ','.join(enabled_languages),
|
||||
"--with-mpc=%s" % spec['mpc'].prefix,
|
||||
"--with-mpfr=%s" % spec['mpfr'].prefix,
|
||||
"--with-gmp=%s" % spec['gmp'].prefix,
|
||||
"--enable-lto",
|
||||
"--with-gnu-ld",
|
||||
"--with-gnu-as",
|
||||
"--with-quad"]
|
||||
# Binutils
|
||||
binutils_options = ["--with-stage1-ldflags=%s" % self.rpath_args,
|
||||
"--with-boot-ldflags=%s" % self.rpath_args,
|
||||
"--with-ld=%s/bin/ld" % spec['binutils'].prefix,
|
||||
"--with-as=%s/bin/as" % spec['binutils'].prefix]
|
||||
options.extend(binutils_options)
|
||||
# Isl
|
||||
if spec.satisfies(Gcc.DEPENDS_ON_ISL_PREDICATE):
|
||||
isl_options = ["--with-isl=%s" % spec['isl'].prefix]
|
||||
options.extend(isl_options)
|
||||
|
||||
# Rest of install is straightforward.
|
||||
configure("--prefix=%s" % prefix,
|
||||
"--libdir=%s/lib64" % prefix,
|
||||
"--disable-multilib",
|
||||
"--enable-languages=" + ','.join(enabled_languages),
|
||||
"--with-mpc=%s" % spec['mpc'].prefix,
|
||||
"--with-mpfr=%s" % spec['mpfr'].prefix,
|
||||
"--with-gmp=%s" % spec['gmp'].prefix,
|
||||
"--with-libelf=%s" % spec['libelf'].prefix,
|
||||
"--with-stage1-ldflags=%s" % self.rpath_args,
|
||||
"--with-boot-ldflags=%s" % self.rpath_args,
|
||||
"--enable-lto",
|
||||
"--with-gnu-ld",
|
||||
"--with-ld=%s/bin/ld" % spec['binutils'].prefix,
|
||||
"--with-gnu-as",
|
||||
"--with-as=%s/bin/as" % spec['binutils'].prefix,
|
||||
"--with-quad")
|
||||
configure(*options)
|
||||
make()
|
||||
make("install")
|
||||
|
||||
|
@ -100,13 +112,11 @@ def write_rpath_specs(self):
|
|||
return
|
||||
|
||||
gcc = Executable(join_path(self.prefix.bin, 'gcc'))
|
||||
lines = gcc('-dumpspecs', return_output=True).split("\n")
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith("*link:"):
|
||||
specs_file = join_path(self.spec_dir, 'specs')
|
||||
with closing(open(specs_file, 'w')) as out:
|
||||
out.write(lines[i] + "\n")
|
||||
out.write("-rpath %s/lib:%s/lib64 \\\n"
|
||||
% (self.prefix, self.prefix))
|
||||
out.write(lines[i+1] + "\n")
|
||||
set_install_permissions(specs_file)
|
||||
lines = gcc('-dumpspecs', return_output=True).strip().split("\n")
|
||||
specs_file = join_path(self.spec_dir, 'specs')
|
||||
with closing(open(specs_file, 'w')) as out:
|
||||
for line in lines:
|
||||
out.write(line + "\n")
|
||||
if line.startswith("*link:"):
|
||||
out.write("-rpath %s/lib:%s/lib64 \\\n"% (self.prefix, self.prefix))
|
||||
set_install_permissions(specs_file)
|
||||
|
|
21
var/spack/packages/gflags/package.py
Normal file
21
var/spack/packages/gflags/package.py
Normal file
|
@ -0,0 +1,21 @@
|
|||
import os
|
||||
from spack import *
|
||||
|
||||
class Gflags(Package):
|
||||
"""The gflags package contains a C++ library that implements
|
||||
commandline flags processing. It includes built-in support for
|
||||
standard types such as string and the ability to define flags
|
||||
in the source file in which they are used. Online documentation
|
||||
available at: https://gflags.github.io/gflags/"""
|
||||
|
||||
homepage = "https://gflags.github.io/gflags"
|
||||
url = "https://github.com/gflags/gflags/archive/v2.1.2.tar.gz"
|
||||
|
||||
version('2.1.2', 'ac432de923f9de1e9780b5254884599f')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
cmake("-DCMAKE_INSTALL_PREFIX=" + prefix,
|
||||
"-DBUILD_SHARED_LIBS=ON")
|
||||
make()
|
||||
make("test")
|
||||
make("install")
|
17
var/spack/packages/ghostscript/package.py
Normal file
17
var/spack/packages/ghostscript/package.py
Normal file
|
@ -0,0 +1,17 @@
|
|||
from spack import *
|
||||
|
||||
class Ghostscript(Package):
|
||||
"""an interpreter for the PostScript language and for PDF. """
|
||||
homepage = "http://ghostscript.com/"
|
||||
url = "http://downloads.ghostscript.com/public/ghostscript-9.16.tar.gz"
|
||||
|
||||
version('9.16', '829319325bbdb83f5c81379a8f86f38f')
|
||||
|
||||
parallel = False
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=%s" %prefix, "--enable-shared")
|
||||
|
||||
make()
|
||||
make("install")
|
||||
|
19
var/spack/packages/glm/package.py
Normal file
19
var/spack/packages/glm/package.py
Normal file
|
@ -0,0 +1,19 @@
|
|||
from spack import *
|
||||
|
||||
|
||||
class Glm(Package):
|
||||
"""
|
||||
OpenGL Mathematics (GLM) is a header only C++ mathematics library for graphics software based on
|
||||
the OpenGL Shading Language (GLSL) specification.
|
||||
"""
|
||||
|
||||
homepage = "https://github.com/g-truc/glm"
|
||||
url = "https://github.com/g-truc/glm/archive/0.9.7.1.tar.gz"
|
||||
|
||||
version('0.9.7.1', '61af6639cdf652d1cdd7117190afced8')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
with working_dir('spack-build', create=True):
|
||||
cmake('..', *std_cmake_args)
|
||||
make()
|
||||
make("install")
|
15
var/spack/packages/glog/package.py
Normal file
15
var/spack/packages/glog/package.py
Normal file
|
@ -0,0 +1,15 @@
|
|||
import os
|
||||
from spack import *
|
||||
|
||||
class Glog(Package):
|
||||
"""C++ implementation of the Google logging module."""
|
||||
|
||||
homepage = "https://github.com/google/glog"
|
||||
url = "https://github.com/google/glog/archive/v0.3.3.tar.gz"
|
||||
|
||||
version('0.3.3', 'c1f86af27bd9c73186730aa957607ed0')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=" + prefix)
|
||||
make()
|
||||
make("install")
|
21
var/spack/packages/graphviz/package.py
Normal file
21
var/spack/packages/graphviz/package.py
Normal file
|
@ -0,0 +1,21 @@
|
|||
from spack import *
|
||||
|
||||
class Graphviz(Package):
|
||||
"""Graph Visualization Software"""
|
||||
homepage = "http://www.graphviz.org"
|
||||
url = "http://www.graphviz.org/pub/graphviz/stable/SOURCES/graphviz-2.38.0.tar.gz"
|
||||
|
||||
version('2.38.0', '5b6a829b2ac94efcd5fa3c223ed6d3ae')
|
||||
|
||||
parallel = False
|
||||
|
||||
depends_on("swig")
|
||||
depends_on("python")
|
||||
depends_on("ghostscript")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=%s" %prefix)
|
||||
|
||||
make()
|
||||
make("install")
|
||||
|
|
@ -10,7 +10,8 @@ class Hdf5(Package):
|
|||
url = "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-1.8.13/src/hdf5-1.8.13.tar.gz"
|
||||
list_url = "http://www.hdfgroup.org/ftp/HDF5/releases"
|
||||
list_depth = 3
|
||||
|
||||
|
||||
version('1.8.15', '03cccb5b33dbe975fdcd8ae9dc021f24')
|
||||
version('1.8.13', 'c03426e9e77d7766944654280b467289')
|
||||
|
||||
depends_on("mpi")
|
||||
|
|
29
var/spack/packages/leveldb/package.py
Normal file
29
var/spack/packages/leveldb/package.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
import os
|
||||
import glob
|
||||
from spack import *
|
||||
|
||||
class Leveldb(Package):
|
||||
"""LevelDB is a fast key-value storage library written at Google
|
||||
that provides an ordered mapping from string keys to string values."""
|
||||
|
||||
homepage = "https://github.com/google/leveldb"
|
||||
url = "https://github.com/google/leveldb/archive/v1.18.tar.gz"
|
||||
|
||||
version('1.18', '73770de34a2a5ab34498d2e05b2b7fa0')
|
||||
|
||||
depends_on("snappy")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
make()
|
||||
|
||||
mkdirp(prefix.include)
|
||||
mkdirp(prefix.lib)
|
||||
|
||||
cp = which('cp')
|
||||
|
||||
# cp --preserve=links libleveldb.* prefix/lib
|
||||
args = glob.glob('libleveldb.*')
|
||||
args.append(prefix + '/lib')
|
||||
cp('--preserve=links', *args)
|
||||
|
||||
cp('-r', 'include/leveldb', prefix + '/include')
|
|
@ -36,6 +36,8 @@ class Libelf(Package):
|
|||
version('0.8.13', '4136d7b4c04df68b686570afa26988ac')
|
||||
version('0.8.12', 'e21f8273d9f5f6d43a59878dc274fec7')
|
||||
|
||||
provides('elf')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=" + prefix,
|
||||
"--enable-shared",
|
||||
|
|
|
@ -6,11 +6,12 @@ class Libffi(Package):
|
|||
to call any function specified by a call interface description at
|
||||
run time."""
|
||||
homepage = "https://sourceware.org/libffi/"
|
||||
url = "ftp://sourceware.org/pub/libffi/libffi-3.1.tar.gz"
|
||||
|
||||
version('3.1', 'f5898b29bbfd70502831a212d9249d10')
|
||||
|
||||
version('3.2.1','83b89587607e3eb65c70d361f13bab43',url = "ftp://sourceware.org/pub/libffi/libffi-3.2.1.tar.gz")
|
||||
#version('3.1', 'f5898b29bbfd70502831a212d9249d10',url = "ftp://sourceware.org/pub/libffi/libffi-3.1.tar.gz") # Has a bug $(lib64) instead of ${lib64} in libffi.pc
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=%s" % prefix)
|
||||
make()
|
||||
make("install")
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ class Libxcb(Package):
|
|||
url = "http://xcb.freedesktop.org/dist/libxcb-1.11.tar.gz"
|
||||
|
||||
version('1.11', '1698dd837d7e6e94d029dbe8b3a82deb')
|
||||
|
||||
version('1.11.1', '118623c15a96b08622603a71d8789bf3')
|
||||
depends_on("python")
|
||||
depends_on("xcb-proto")
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
##############################################################################
|
||||
from spack import *
|
||||
|
||||
|
||||
class Llvm(Package):
|
||||
"""The LLVM Project is a collection of modular and reusable compiler and
|
||||
toolchain technologies. Despite its name, LLVM has little to do with
|
||||
|
@ -31,14 +32,14 @@ class Llvm(Package):
|
|||
that can be used to build them. The name "LLVM" itself is not an acronym;
|
||||
it is the full name of the project.
|
||||
"""
|
||||
homepage = "http://llvm.org/"
|
||||
list_url = "http://llvm.org/releases/download.html"
|
||||
homepage = 'http://llvm.org/'
|
||||
url = 'http://llvm.org/releases/3.7.0/llvm-3.7.0.src.tar.xz'
|
||||
|
||||
version('3.7.0', 'b98b9495e5655a672d6cb83e1a180f8e', url='http://llvm.org/releases/3.7.0/llvm-3.7.0.src.tar.xz')
|
||||
version('3.6.2', '0c1ee3597d75280dee603bae9cbf5cc2', url='http://llvm.org/releases/3.6.2/llvm-3.6.2.src.tar.xz')
|
||||
version('3.5.1', '2d3d8004f38852aa679e5945b8ce0b14', url='http://llvm.org/releases/3.5.1/llvm-3.5.1.src.tar.xz')
|
||||
version('3.4.2', 'a20669f75967440de949ac3b1bad439c', url='http://llvm.org/releases/3.4.2/llvm-3.4.2.src.tar.gz')
|
||||
version('3.0', 'a8e5f5f1c1adebae7b4a654c376a6005', url='http://llvm.org/releases/3.0/llvm-3.0.tar.gz')
|
||||
version('2.9', '793138412d2af2c7c7f54615f8943771', url='http://llvm.org/releases/2.9/llvm-2.9.tgz')
|
||||
version('2.8', '220d361b4d17051ff4bb21c64abe05ba', url='http://llvm.org/releases/2.8/llvm-2.8.tgz')
|
||||
|
||||
depends_on('python@2.7:')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
env['CXXFLAGS'] = self.compiler.cxx11_flag
|
||||
|
@ -46,9 +47,7 @@ def install(self, spec, prefix):
|
|||
with working_dir('spack-build', create=True):
|
||||
cmake('..',
|
||||
'-DLLVM_REQUIRES_RTTI=1',
|
||||
'-DPYTHON_EXECUTABLE=/usr/bin/python',
|
||||
'-DPYTHON_INCLUDE_DIR=/usr/include/python2.6',
|
||||
'-DPYTHON_LIBRARY=/usr/lib64/libpython2.6.so',
|
||||
'-DPYTHON_EXECUTABLE=%s/bin/python' % spec['python'].prefix,
|
||||
*std_cmake_args)
|
||||
make()
|
||||
make("install")
|
||||
|
|
39
var/spack/packages/lmdb/package.py
Normal file
39
var/spack/packages/lmdb/package.py
Normal file
|
@ -0,0 +1,39 @@
|
|||
import os
|
||||
from spack import *
|
||||
|
||||
class Lmdb(Package):
|
||||
"""Read-only mirror of official repo on openldap.org. Issues and
|
||||
pull requests here are ignored. Use OpenLDAP ITS for issues.
|
||||
http://www.openldap.org/software/repo.html"""
|
||||
|
||||
|
||||
homepage = "http://www.openldap.org/software/repo.html"
|
||||
url = "https://github.com/LMDB/lmdb/archive/LMDB_0.9.16.tar.gz"
|
||||
|
||||
version('0.9.16', '0de89730b8f3f5711c2b3a4ba517b648')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
os.chdir('libraries/liblmdb')
|
||||
|
||||
make()
|
||||
|
||||
mkdirp(prefix.bin)
|
||||
mkdirp(prefix + '/man/man1')
|
||||
mkdirp(prefix.lib)
|
||||
mkdirp(prefix.include)
|
||||
|
||||
bins = ['mdb_stat', 'mdb_copy', 'mdb_dump', 'mdb_load']
|
||||
for f in bins:
|
||||
install(f, prefix.bin)
|
||||
|
||||
mans = ['mdb_stat.1', 'mdb_copy.1', 'mdb_dump.1', 'mdb_load.1']
|
||||
for f in mans:
|
||||
install(f, prefix + '/man/man1')
|
||||
|
||||
libs = ['liblmdb.a', 'liblmdb.so']
|
||||
for f in libs:
|
||||
install(f, prefix.lib)
|
||||
|
||||
includes = ['lmdb.h']
|
||||
for f in includes:
|
||||
install(f, prefix.include)
|
15
var/spack/packages/matio/package.py
Normal file
15
var/spack/packages/matio/package.py
Normal file
|
@ -0,0 +1,15 @@
|
|||
from spack import *
|
||||
|
||||
|
||||
class Matio(Package):
|
||||
"""matio is an C library for reading and writing Matlab MAT files"""
|
||||
homepage = "http://sourceforge.net/projects/matio/"
|
||||
url = "http://downloads.sourceforge.net/project/matio/matio/1.5.2/matio-1.5.2.tar.gz"
|
||||
|
||||
version('1.5.2', '85b007b99916c63791f28398f6a4c6f1')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure('--prefix=%s' % prefix)
|
||||
|
||||
make()
|
||||
make("install")
|
|
@ -33,11 +33,15 @@ class Mpich(Package):
|
|||
list_url = "http://www.mpich.org/static/downloads/"
|
||||
list_depth = 2
|
||||
|
||||
version('3.1.4', '2ab544607986486562e076b83937bba2')
|
||||
version('3.1.3', '93cb17f91ac758cbf9174ecb03563778')
|
||||
version('3.1.2', '7fbf4b81dcb74b07ae85939d1ceee7f1')
|
||||
version('3.1.1', '40dc408b1e03cc36d80209baaa2d32b7')
|
||||
version('3.1', '5643dd176499bfb7d25079aaff25f2ec')
|
||||
version('3.0.4', '9c5d5d4fe1e17dd12153f40bc5b6dbc0')
|
||||
|
||||
provides('mpi@:3', when='@3:')
|
||||
provides('mpi@:1', when='@1:')
|
||||
|
||||
provides('mpi@:3.0', when='@3:')
|
||||
provides('mpi@:1.3', when='@1:')
|
||||
|
||||
def setup_dependent_environment(self, module, spec, dep_spec):
|
||||
"""For dependencies, make mpicc's use spack wrapper."""
|
||||
|
|
28
var/spack/packages/ncdu/package.py
Normal file
28
var/spack/packages/ncdu/package.py
Normal file
|
@ -0,0 +1,28 @@
|
|||
from spack import *
|
||||
|
||||
class Ncdu(Package):
|
||||
"""
|
||||
Ncdu is a disk usage analyzer with an ncurses interface. It is designed
|
||||
to find space hogs on a remote server where you don't have an entire
|
||||
gaphical setup available, but it is a useful tool even on regular desktop
|
||||
systems. Ncdu aims to be fast, simple and easy to use, and should be able
|
||||
to run in any minimal POSIX-like environment with ncurses installed.
|
||||
"""
|
||||
|
||||
homepage = "http://dev.yorhel.nl/ncdu"
|
||||
url = "http://dev.yorhel.nl/download/ncdu-1.11.tar.gz"
|
||||
|
||||
version('1.11', '9e44240a5356b029f05f0e70a63c4d12')
|
||||
version('1.10', '7535decc8d54eca811493e82d4bfab2d')
|
||||
version('1.9' , '93258079db897d28bb8890e2db89b1fb')
|
||||
version('1.8' , '94d7a821f8a0d7ba8ef3dd926226f7d5')
|
||||
version('1.7' , '172047c29d232724cc62e773e82e592a')
|
||||
|
||||
depends_on("ncurses")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure('--prefix=%s' % prefix,
|
||||
'--with-ncurses=%s' % spec['ncurses'])
|
||||
|
||||
make()
|
||||
make("install")
|
|
@ -11,6 +11,8 @@ class Ncurses(Package):
|
|||
|
||||
version('5.9', '8cb9c412e5f2d96bc6f459aa8c6282a1',
|
||||
url='http://ftp.gnu.org/pub/gnu/ncurses/ncurses-5.9.tar.gz')
|
||||
version('6.0', 'ee13d052e1ead260d7c28071f46eefb1',
|
||||
url='http://ftp.gnu.org/pub/gnu/ncurses/ncurses-6.0.tar.gz')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=%s" % prefix,
|
||||
|
|
25
var/spack/packages/netcdf/netcdf-4.3.3-mpi.patch
Normal file
25
var/spack/packages/netcdf/netcdf-4.3.3-mpi.patch
Normal file
|
@ -0,0 +1,25 @@
|
|||
diff -Nur netcdf-4.3.3/CMakeLists.txt netcdf-4.3.3.mpi/CMakeLists.txt
|
||||
--- netcdf-4.3.3/CMakeLists.txt 2015-02-12 16:44:35.000000000 -0500
|
||||
+++ netcdf-4.3.3.mpi/CMakeLists.txt 2015-10-14 16:44:41.176300658 -0400
|
||||
@@ -753,6 +753,7 @@
|
||||
SET(USE_PARALLEL OFF CACHE BOOL "")
|
||||
MESSAGE(STATUS "Cannot find HDF5 library built with parallel support. Disabling parallel build.")
|
||||
ELSE()
|
||||
+ FIND_PACKAGE(MPI REQUIRED)
|
||||
SET(USE_PARALLEL ON CACHE BOOL "")
|
||||
SET(STATUS_PARALLEL "ON")
|
||||
ENDIF()
|
||||
diff -Nur netcdf-4.3.3/liblib/CMakeLists.txt netcdf-4.3.3.mpi/liblib/CMakeLists.txt
|
||||
--- netcdf-4.3.3/liblib/CMakeLists.txt 2015-02-12 16:44:35.000000000 -0500
|
||||
+++ netcdf-4.3.3.mpi/liblib/CMakeLists.txt 2015-10-14 16:44:57.757793634 -0400
|
||||
@@ -71,6 +71,10 @@
|
||||
SET(TLL_LIBS ${TLL_LIBS} ${CURL_LIBRARY})
|
||||
ENDIF()
|
||||
|
||||
+IF(USE_PARALLEL)
|
||||
+ SET(TLL_LIBS ${TLL_LIBS} ${MPI_C_LIBRARIES})
|
||||
+ENDIF()
|
||||
+
|
||||
IF(USE_HDF4)
|
||||
SET(TLL_LIBS ${TLL_LIBS} ${HDF4_LIBRARIES})
|
||||
ENDIF()
|
|
@ -1,28 +1,27 @@
|
|||
from spack import *
|
||||
|
||||
class Netcdf(Package):
|
||||
"""NetCDF is a set of software libraries and self-describing, machine-independent
|
||||
data formats that support the creation, access, and sharing of array-oriented
|
||||
scientific data."""
|
||||
"""NetCDF is a set of software libraries and self-describing, machine-independent
|
||||
data formats that support the creation, access, and sharing of array-oriented
|
||||
scientific data."""
|
||||
|
||||
homepage = "http://www.unidata.ucar.edu/software/netcdf/"
|
||||
url = "ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-4.3.3.tar.gz"
|
||||
|
||||
version('4.3.3', '5fbd0e108a54bd82cb5702a73f56d2ae')
|
||||
|
||||
patch('netcdf-4.3.3-mpi.patch')
|
||||
|
||||
# Dependencies:
|
||||
# >HDF5
|
||||
# >HDF5
|
||||
depends_on("hdf5")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure(
|
||||
"--prefix=%s" % prefix,
|
||||
"--disable-dap", # Disable DAP.
|
||||
"--disable-shared", # Don't build shared libraries (use static libs).
|
||||
"CPPFLAGS=-I%s/include" % spec['hdf5'].prefix, # Link HDF5's include dir.
|
||||
"LDFLAGS=-L%s/lib" % spec['hdf5'].prefix) # Link HDF5's lib dir.
|
||||
|
||||
make("install")
|
||||
def install(self, spec, prefix):
|
||||
with working_dir('spack-build', create=True):
|
||||
cmake('..',
|
||||
"-DCMAKE_INSTALL_PREFIX:PATH=%s" % prefix,
|
||||
"-DENABLE_DAP:BOOL=OFF", # Disable DAP.
|
||||
"-DBUILD_SHARED_LIBS:BOOL=OFF") # Don't build shared libraries (use static libs).
|
||||
|
||||
# Check the newly installed netcdf package. Currently disabled.
|
||||
# make("check")
|
||||
make()
|
||||
make("install")
|
||||
|
|
|
@ -9,6 +9,8 @@ class NetlibBlas(Package):
|
|||
|
||||
version('3.5.0', 'b1d3e3e425b2e44a06760ff173104bdf')
|
||||
|
||||
variant('fpic', default=False, description="Build with -fpic compiler option")
|
||||
|
||||
# virtual dependency
|
||||
provides('blas')
|
||||
|
||||
|
@ -23,6 +25,10 @@ def patch(self):
|
|||
mf.filter('^LOADER.*', 'LOADER = f90')
|
||||
mf.filter('^CC =.*', 'CC = cc')
|
||||
|
||||
if '+fpic' in self.spec:
|
||||
mf.filter('^OPTS.*=.*', 'OPTS = -O2 -frecursive -fpic')
|
||||
mf.filter('^CFLAGS =.*', 'CFLAGS = -O3 -fpic')
|
||||
|
||||
|
||||
def install(self, spec, prefix):
|
||||
make('blaslib')
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from spack import *
|
||||
|
||||
class Lapack(Package):
|
||||
class NetlibLapack(Package):
|
||||
"""
|
||||
LAPACK version 3.X is a comprehensive FORTRAN library that does
|
||||
linear algebra operations including matrix inversions, least
|
||||
|
@ -18,9 +18,16 @@ class Lapack(Package):
|
|||
version('3.4.0', '02d5706ec03ba885fc246e5fa10d8c70')
|
||||
version('3.3.1', 'd0d533ec9a5b74933c2a1e84eedc58b4')
|
||||
|
||||
variant('shared', default=False, description="Build shared library version")
|
||||
|
||||
# virtual dependency
|
||||
provides('lapack')
|
||||
|
||||
# blas is a virtual dependency.
|
||||
depends_on('blas')
|
||||
|
||||
depends_on('cmake')
|
||||
|
||||
# Doesn't always build correctly in parallel
|
||||
parallel = False
|
||||
|
||||
|
@ -39,7 +46,14 @@ def get_blas_libs(self):
|
|||
|
||||
def install(self, spec, prefix):
|
||||
blas_libs = ";".join(self.get_blas_libs())
|
||||
cmake(".", '-DBLAS_LIBRARIES=' + blas_libs, *std_cmake_args)
|
||||
cmake_args = [".", '-DBLAS_LIBRARIES=' + blas_libs]
|
||||
|
||||
if '+shared' in spec:
|
||||
cmake_args.append('-DBUILD_SHARED_LIBS=ON')
|
||||
|
||||
cmake_args += std_cmake_args
|
||||
|
||||
cmake(*cmake_args)
|
||||
make()
|
||||
make("install")
|
||||
|
23
var/spack/packages/ompt-openmp/package.py
Normal file
23
var/spack/packages/ompt-openmp/package.py
Normal file
|
@ -0,0 +1,23 @@
|
|||
from spack import *
|
||||
|
||||
class OmptOpenmp(Package):
|
||||
"""LLVM/Clang OpenMP runtime with OMPT support. This is a fork of the OpenMPToolsInterface/LLVM-openmp fork of the official LLVM OpenMP mirror. This library provides a drop-in replacement of the OpenMP runtimes for GCC, Intel and LLVM/Clang."""
|
||||
homepage = "https://github.com/OpenMPToolsInterface/LLVM-openmp"
|
||||
url = "http://github.com/khuck/LLVM-openmp/archive/v0.1-spack.tar.gz"
|
||||
|
||||
version('spack', '35227b2726e377faa433fc841226e036')
|
||||
|
||||
# depends_on("foo")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
with working_dir("runtime/build", create=True):
|
||||
|
||||
# FIXME: Modify the configure line to suit your build system here.
|
||||
cmake('-DCMAKE_C_COMPILER=%s' % self.compiler.cc,
|
||||
'-DCMAKE_CXX_COMPILER=%s' % self.compiler.cxx,
|
||||
'-DCMAKE_INSTALL_PREFIX=%s' % prefix,
|
||||
'..', *std_cmake_args)
|
||||
|
||||
# FIXME: Add logic to build and install here
|
||||
make()
|
||||
make("install")
|
|
@ -1,5 +1,8 @@
|
|||
import os
|
||||
|
||||
from spack import *
|
||||
|
||||
|
||||
class Openmpi(Package):
|
||||
"""Open MPI is a project combining technologies and resources from
|
||||
several other projects (FT-MPI, LA-MPI, LAM/MPI, and PACX-MPI)
|
||||
|
@ -11,15 +14,28 @@ class Openmpi(Package):
|
|||
|
||||
homepage = "http://www.open-mpi.org"
|
||||
|
||||
version('1.8.2', 'ab538ed8e328079d566fc797792e016e',
|
||||
url='http://www.open-mpi.org/software/ompi/v1.8/downloads/openmpi-1.8.2.tar.gz')
|
||||
version('1.10.0', '280cf952de68369cebaca886c5ce0304',
|
||||
url = "http://www.open-mpi.org/software/ompi/v1.10/downloads/openmpi-1.10.0.tar.bz2")
|
||||
version('1.8.8', '0dab8e602372da1425e9242ae37faf8c',
|
||||
url = 'http://www.open-mpi.org/software/ompi/v1.8/downloads/openmpi-1.8.8.tar.bz2')
|
||||
version('1.6.5', '03aed2a4aa4d0b27196962a2a65fc475',
|
||||
url = "http://www.open-mpi.org/software/ompi/v1.6/downloads/openmpi-1.6.5.tar.bz2")
|
||||
|
||||
patch('ad_lustre_rwcontig_open_source.patch', when="@1.6.5")
|
||||
patch('llnl-platforms.patch', when="@1.6.5")
|
||||
|
||||
provides('mpi@:2')
|
||||
provides('mpi@:2.2', when='@1.6.5') # Open MPI 1.6.5 supports MPI-2.2
|
||||
provides('mpi@:3.0', when='@1.8.8') # Open MPI 1.8.8 supports MPI-3.0
|
||||
provides('mpi@:3.0', when='@1.10.0') # Open MPI 1.10.0 supports MPI-3.0
|
||||
|
||||
|
||||
def setup_dependent_environment(self, module, spec, dep_spec):
|
||||
"""For dependencies, make mpicc's use spack wrapper."""
|
||||
os.environ['OMPI_CC'] = 'cc'
|
||||
os.environ['OMPI_CXX'] = 'c++'
|
||||
os.environ['OMPI_FC'] = 'f90'
|
||||
os.environ['OMPI_F77'] = 'f77'
|
||||
|
||||
|
||||
def install(self, spec, prefix):
|
||||
config_args = ["--prefix=%s" % prefix]
|
||||
|
@ -39,3 +55,55 @@ def install(self, spec, prefix):
|
|||
configure(*config_args)
|
||||
make()
|
||||
make("install")
|
||||
|
||||
self.filter_compilers()
|
||||
|
||||
|
||||
def filter_compilers(self):
|
||||
"""Run after install to make the MPI compilers use the
|
||||
compilers that Spack built the package with.
|
||||
|
||||
If this isn't done, they'll have CC, CXX and FC set
|
||||
to Spack's generic cc, c++ and f90. We want them to
|
||||
be bound to whatever compiler they were built with.
|
||||
"""
|
||||
kwargs = { 'ignore_absent' : True, 'backup' : False, 'string' : False }
|
||||
dir = os.path.join(self.prefix, 'share/openmpi/')
|
||||
|
||||
cc_wrappers = ['mpicc-vt-wrapper-data.txt', 'mpicc-wrapper-data.txt',
|
||||
'ortecc-wrapper-data.txt', 'shmemcc-wrapper-data.txt']
|
||||
|
||||
cxx_wrappers = ['mpic++-vt-wrapper-data.txt', 'mpic++-wrapper-data.txt',
|
||||
'ortec++-wrapper-data.txt']
|
||||
|
||||
fc_wrappers = ['mpifort-vt-wrapper-data.txt',
|
||||
'mpifort-wrapper-data.txt', 'shmemfort-wrapper-data.txt']
|
||||
|
||||
for wrapper in cc_wrappers:
|
||||
filter_file('compiler=.*', 'compiler=%s' % self.compiler.cc,
|
||||
os.path.join(dir, wrapper), **kwargs)
|
||||
|
||||
for wrapper in cxx_wrappers:
|
||||
filter_file('compiler=.*', 'compiler=%s' % self.compiler.cxx,
|
||||
os.path.join(dir, wrapper), **kwargs)
|
||||
|
||||
for wrapper in fc_wrappers:
|
||||
filter_file('compiler=.*', 'compiler=%s' % self.compiler.fc,
|
||||
os.path.join(dir, wrapper), **kwargs)
|
||||
|
||||
# These are symlinks in newer versions, so check that here
|
||||
f77_wrappers = ['mpif77-vt-wrapper-data.txt', 'mpif77-wrapper-data.txt']
|
||||
f90_wrappers = ['mpif90-vt-wrapper-data.txt', 'mpif90-wrapper-data.txt']
|
||||
|
||||
for wrapper in f77_wrappers:
|
||||
path = os.path.join(dir, wrapper)
|
||||
if not os.path.islink(path):
|
||||
filter_file('compiler=.*', 'compiler=%s' % self.compiler.f77,
|
||||
path, **kwargs)
|
||||
for wrapper in f90_wrappers:
|
||||
path = os.path.join(dir, wrapper)
|
||||
if not os.path.islink(path):
|
||||
filter_file('compiler=.*', 'compiler=%s' % self.compiler.fc,
|
||||
path, **kwargs)
|
||||
|
||||
|
||||
|
|
72
var/spack/packages/paraview/package.py
Normal file
72
var/spack/packages/paraview/package.py
Normal file
|
@ -0,0 +1,72 @@
|
|||
from spack import *
|
||||
|
||||
class Paraview(Package):
|
||||
homepage = 'http://www.paraview.org'
|
||||
url = 'http://www.paraview.org/files/v4.4/ParaView-v4.4.0-source.tar.gz'
|
||||
|
||||
version('4.4.0', 'fa1569857dd680ebb4d7ff89c2227378', url='http://www.paraview.org/files/v4.4/ParaView-v4.4.0-source.tar.gz')
|
||||
|
||||
variant('python', default=False, description='Enable Python support')
|
||||
variant('matplotlib', default=False, description='Enable Matplotlib support')
|
||||
variant('numpy', default=False, description='Enable NumPy support')
|
||||
|
||||
variant('tcl', default=False, description='Enable TCL support')
|
||||
|
||||
variant('mpi', default=False, description='Enable MPI support')
|
||||
|
||||
variant('osmesa', default=False, description='Enable OSMesa support')
|
||||
variant('qt', default=False, description='Enable Qt support')
|
||||
|
||||
depends_on('python', when='+python')
|
||||
depends_on('py-numpy', when='+python+numpy')
|
||||
depends_on('py-matplotlib', when='+python+matplotlib')
|
||||
depends_on('tcl', when='+tcl')
|
||||
depends_on('mpi', when='+mpi')
|
||||
depends_on('qt', when='+qt')
|
||||
|
||||
depends_on('bzip2')
|
||||
depends_on('freetype')
|
||||
depends_on('hdf5') # drags in mpi
|
||||
depends_on('jpeg')
|
||||
depends_on('libpng')
|
||||
depends_on('libtiff')
|
||||
#depends_on('libxml2') # drags in python
|
||||
depends_on('netcdf')
|
||||
#depends_on('protobuf') # version mismatches?
|
||||
#depends_on('sqlite') # external version not supported
|
||||
depends_on('zlib')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
with working_dir('spack-build', create=True):
|
||||
def feature_to_bool(feature, on='ON', off='OFF'):
|
||||
if feature in spec:
|
||||
return on
|
||||
return off
|
||||
|
||||
def nfeature_to_bool(feature):
|
||||
return feature_to_bool(feature, on='OFF', off='ON')
|
||||
|
||||
feature_args = std_cmake_args[:]
|
||||
feature_args.append('-DPARAVIEW_BUILD_QT_GUI:BOOL=%s' % feature_to_bool('+qt'))
|
||||
feature_args.append('-DPARAVIEW_ENABLE_PYTHON:BOOL=%s' % feature_to_bool('+python'))
|
||||
feature_args.append('-DPARAVIEW_USE_MPI:BOOL=%s' % feature_to_bool('+mpi'))
|
||||
feature_args.append('-DVTK_ENABLE_TCL_WRAPPING:BOOL=%s' % feature_to_bool('+tcl'))
|
||||
feature_args.append('-DVTK_OPENGL_HAS_OSMESA:BOOL=%s' % feature_to_bool('+osmesa'))
|
||||
feature_args.append('-DVTK_USE_X:BOOL=%s' % nfeature_to_bool('+osmesa'))
|
||||
feature_args.append('-DVTK_RENDERING_BACKEND:STRING=%s' % feature_to_bool('+opengl2', 'OpenGL2', 'OpenGL'))
|
||||
|
||||
feature_args.extend(std_cmake_args)
|
||||
|
||||
cmake('..',
|
||||
'-DCMAKE_INSTALL_PREFIX:PATH=%s' % prefix,
|
||||
'-DBUILD_TESTING:BOOL=OFF',
|
||||
'-DVTK_USER_SYSTEM_FREETYPE:BOOL=ON',
|
||||
'-DVTK_USER_SYSTEM_HDF5:BOOL=ON',
|
||||
'-DVTK_USER_SYSTEM_JPEG:BOOL=ON',
|
||||
#'-DVTK_USER_SYSTEM_LIBXML2:BOOL=ON',
|
||||
'-DVTK_USER_SYSTEM_NETCDF:BOOL=ON',
|
||||
'-DVTK_USER_SYSTEM_TIFF:BOOL=ON',
|
||||
'-DVTK_USER_SYSTEM_ZLIB:BOOL=ON',
|
||||
*feature_args)
|
||||
make()
|
||||
make('install')
|
21
var/spack/packages/pidx/package.py
Normal file
21
var/spack/packages/pidx/package.py
Normal file
|
@ -0,0 +1,21 @@
|
|||
from spack import *
|
||||
|
||||
class Pidx(Package):
|
||||
"""PIDX Parallel I/O Library.
|
||||
|
||||
PIDX is an efficient parallel I/O library that reads and writes
|
||||
multiresolution IDX data files.
|
||||
"""
|
||||
|
||||
homepage = "http://www.cedmav.com/pidx"
|
||||
|
||||
version('1.0', git='https://github.com/sci-visus/PIDX.git',
|
||||
commit='6afa1cf71d1c41263296dc049c8fabaf73c296da')
|
||||
|
||||
depends_on("mpi")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
with working_dir('spack-build', create=True):
|
||||
cmake('..', *std_cmake_args)
|
||||
make()
|
||||
make("install")
|
17
var/spack/packages/pkg-config/package.py
Normal file
17
var/spack/packages/pkg-config/package.py
Normal file
|
@ -0,0 +1,17 @@
|
|||
from spack import *
|
||||
|
||||
class PkgConfig(Package):
|
||||
"""pkg-config is a helper tool used when compiling applications and libraries"""
|
||||
homepage = "http://www.freedesktop.org/wiki/Software/pkg-config/"
|
||||
url = "http://pkgconfig.freedesktop.org/releases/pkg-config-0.28.tar.gz"
|
||||
|
||||
version('0.28', 'aa3c86e67551adc3ac865160e34a2a0d')
|
||||
|
||||
parallel = False
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=%s" %prefix, "--enable-shared")
|
||||
|
||||
make()
|
||||
make("install")
|
||||
|
16
var/spack/packages/protobuf/package.py
Normal file
16
var/spack/packages/protobuf/package.py
Normal file
|
@ -0,0 +1,16 @@
|
|||
import os
|
||||
from spack import *
|
||||
|
||||
class Protobuf(Package):
|
||||
"""Google's data interchange format."""
|
||||
|
||||
homepage = "https://developers.google.com/protocol-buffers"
|
||||
url = "https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.bz2"
|
||||
|
||||
version('2.5.0', 'a72001a9067a4c2c4e0e836d0f92ece4')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=" + prefix)
|
||||
make()
|
||||
make("check")
|
||||
make("install")
|
|
@ -7,9 +7,22 @@ class PyNumpy(Package):
|
|||
|
||||
version('1.9.1', '78842b73560ec378142665e712ae4ad9')
|
||||
version('1.9.2', 'a1ed53432dbcd256398898d35bc8e645')
|
||||
|
||||
|
||||
extends('python')
|
||||
depends_on('py-nose')
|
||||
depends_on('netlib-blas+fpic')
|
||||
depends_on('netlib-lapack+shared')
|
||||
|
||||
def patch(self):
|
||||
filter_file(
|
||||
"possible_executables = \['(gfortran|g77|ifort|efl)",
|
||||
"possible_executables = ['fc",
|
||||
"numpy/distutils/fcompiler/gnu.py",
|
||||
"numpy/distutils/fcompiler/intel.py")
|
||||
|
||||
def install(self, spec, prefix):
|
||||
with open('site.cfg', 'w') as f:
|
||||
f.write('[DEFAULT]\n')
|
||||
f.write('libraries=lapack,blas\n')
|
||||
f.write('library_dirs=%s/lib:%s/lib\n' % (spec['blas'].prefix, spec['lapack'].prefix))
|
||||
python('setup.py', 'install', '--prefix=%s' % prefix)
|
||||
|
|
|
@ -11,6 +11,8 @@ class PyScipy(Package):
|
|||
extends('python')
|
||||
depends_on('py-nose')
|
||||
depends_on('py-numpy')
|
||||
depends_on('blas')
|
||||
depends_on('lapack')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
python('setup.py', 'install', '--prefix=%s' % prefix)
|
||||
|
|
13
var/spack/packages/py-sphinx/package.py
Normal file
13
var/spack/packages/py-sphinx/package.py
Normal file
|
@ -0,0 +1,13 @@
|
|||
from spack import *
|
||||
|
||||
class PySphinx(Package):
|
||||
"""Sphinx Documentation Generator."""
|
||||
homepage = "http://sphinx-doc.org"
|
||||
url = "https://pypi.python.org/packages/source/S/Sphinx/Sphinx-1.3.1.tar.gz"
|
||||
|
||||
version('1.3.1', '8786a194acf9673464c5455b11fd4332')
|
||||
|
||||
extends('python')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
python('setup.py', 'install', '--prefix=%s' % prefix)
|
18
var/spack/packages/samtools/package.py
Normal file
18
var/spack/packages/samtools/package.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
from spack import *
|
||||
|
||||
class Samtools(Package):
|
||||
"""SAM Tools provide various utilities for manipulating alignments in the SAM format,
|
||||
including sorting, merging, indexing and generating
|
||||
alignments in a per-position format"""
|
||||
|
||||
homepage = "www.htslib.org"
|
||||
version('1.2','988ec4c3058a6ceda36503eebecd4122',url = "https://github.com/samtools/samtools/releases/download/1.2/samtools-1.2.tar.bz2")
|
||||
|
||||
depends_on("zlib")
|
||||
depends_on("mpc")
|
||||
parallel=False
|
||||
patch("samtools1.2.patch",level=0)
|
||||
|
||||
def install(self, spec, prefix):
|
||||
make("prefix=%s" % prefix, "install")
|
||||
|
20
var/spack/packages/samtools/samtools1.2.patch
Normal file
20
var/spack/packages/samtools/samtools1.2.patch
Normal file
|
@ -0,0 +1,20 @@
|
|||
--- Makefile 2015-02-03 08:27:34.000000000 -0800
|
||||
+++ Makefile.new 2015-07-21 10:38:27.881406892 -0700
|
||||
@@ -26,7 +26,7 @@
|
||||
CFLAGS = -g -Wall -O2
|
||||
LDFLAGS =
|
||||
LDLIBS =
|
||||
-DFLAGS= -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE -D_CURSES_LIB=1
|
||||
+DFLAGS= -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE -D_CURSES_LIB=0
|
||||
LOBJS= bam_aux.o bam.o bam_import.o sam.o \
|
||||
sam_header.o bam_plbuf.o
|
||||
AOBJS= bam_index.o bam_plcmd.o sam_view.o \
|
||||
@@ -37,7 +37,7 @@
|
||||
faidx.o stats.o stats_isize.o bam_flags.o bam_split.o \
|
||||
bam_tview.o bam_tview_curses.o bam_tview_html.o bam_lpileup.o
|
||||
INCLUDES= -I. -I$(HTSDIR)
|
||||
-LIBCURSES= -lcurses # -lXCurses
|
||||
+#LIBCURSES= -lcurses # -lXCurses
|
||||
|
||||
prefix = /usr/local
|
||||
exec_prefix = $(prefix)
|
|
@ -1,11 +1,10 @@
|
|||
# FIXME: Add copyright statement
|
||||
|
||||
from spack import *
|
||||
from contextlib import closing
|
||||
|
||||
class Scorep(Package):
|
||||
"""The Score-P measurement infrastructure is a highly scalable and
|
||||
easy-to-use tool suite for profiling, event tracing, and online
|
||||
"""The Score-P measurement infrastructure is a highly scalable and
|
||||
easy-to-use tool suite for profiling, event tracing, and online
|
||||
analysis of HPC applications."""
|
||||
|
||||
# FIXME: add a proper url for your package's homepage here.
|
||||
|
@ -20,7 +19,7 @@ class Scorep(Package):
|
|||
depends_on("mpi")
|
||||
depends_on("papi")
|
||||
# depends_on("otf2@1.2:1.2.1") # only Score-P 1.2.x
|
||||
depends_on("otf2")
|
||||
depends_on("otf2")
|
||||
depends_on("opari2")
|
||||
depends_on("cube@4.2:4.2.3")
|
||||
|
||||
|
@ -53,12 +52,12 @@ def install(self, spec, prefix):
|
|||
# Use a custom compiler configuration, otherwise the score-p
|
||||
# build system messes with spack's compiler settings.
|
||||
# Create these three files in the build directory
|
||||
with closing(open("platform-backend-user-provided", "w")) as backend_file:
|
||||
with open("platform-backend-user-provided", "w") as backend_file:
|
||||
backend_file.write(self.backend_user_provided)
|
||||
with closing(open("platform-frontend-user-provided", "w")) as frontend_file:
|
||||
with open("platform-frontend-user-provided", "w") as frontend_file:
|
||||
frontend_file.write(self.frontend_user_provided)
|
||||
with closing(open("platform-mpi-user-provided", "w")) as mpi_file:
|
||||
mpi_file.write(self.mpi_user_provided)
|
||||
with open("platform-mpi-user-provided", "w") as mpi_file:
|
||||
mpi_file.write(self.mpi_user_provided)
|
||||
|
||||
configure_args = ["--prefix=%s" % prefix,
|
||||
"--with-custom-compilers",
|
||||
|
|
|
@ -28,12 +28,14 @@ class Scr(Package):
|
|||
"""SCR caches checkpoint data in storage on the compute nodes of a
|
||||
Linux cluster to provide a fast, scalable checkpoint/restart
|
||||
capability for MPI codes"""
|
||||
|
||||
homepage = "https://computation.llnl.gov/project/scr/"
|
||||
url = "http://downloads.sourceforge.net/project/scalablecr/releases/scr-1.1-7.tar.gz"
|
||||
|
||||
depends_on("mpi")
|
||||
# depends_on("dtcmp")
|
||||
|
||||
version('1.1-7', 'a5930e9ab27d1b7049447c2fd7734ebd')
|
||||
version('1.1-7', 'a5930e9ab27d1b7049447c2fd7734ebd', url='http://downloads.sourceforge.net/project/scalablecr/releases/scr-1.1-7.tar.gz')
|
||||
version('1.1.8', '6a0f11ad18e27fcfc00a271ff587b06e', url='https://github.com/hpc/scr/releases/download/v1.1.8/scr-1.1.8.tar.gz')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=" + prefix,
|
||||
|
|
15
var/spack/packages/snappy/package.py
Normal file
15
var/spack/packages/snappy/package.py
Normal file
|
@ -0,0 +1,15 @@
|
|||
import os
|
||||
from spack import *
|
||||
|
||||
class Snappy(Package):
|
||||
"""A fast compressor/decompressor: https://code.google.com/p/snappy"""
|
||||
|
||||
homepage = "https://code.google.com/p/snappy"
|
||||
url = "https://github.com/google/snappy/releases/download/1.1.3/snappy-1.1.3.tar.gz"
|
||||
|
||||
version('1.1.3', '7358c82f133dc77798e4c2062a749b73')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=" + prefix)
|
||||
make()
|
||||
make("install")
|
|
@ -9,17 +9,20 @@ class Stat(Package):
|
|||
version('2.1.0', 'ece26beaf057aa9134d62adcdda1ba91')
|
||||
version('2.0.0', 'c7494210b0ba26b577171b92838e1a9b')
|
||||
|
||||
variant('dysect', default=False, description="enable DySectAPI")
|
||||
|
||||
depends_on('libelf')
|
||||
depends_on('libdwarf')
|
||||
depends_on('dyninst')
|
||||
depends_on('graphlib')
|
||||
depends_on('graphviz')
|
||||
depends_on('launchmon')
|
||||
depends_on('mrnet')
|
||||
|
||||
patch('configure_mpicxx.patch', when='@2.1.0')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure(
|
||||
configure_args = [
|
||||
"--enable-gui",
|
||||
"--prefix=%s" % prefix,
|
||||
"--disable-examples", # Examples require MPI: avoid this dependency.
|
||||
|
@ -27,7 +30,11 @@ def install(self, spec, prefix):
|
|||
"--with-mrnet=%s" % spec['mrnet'].prefix,
|
||||
"--with-graphlib=%s" % spec['graphlib'].prefix,
|
||||
"--with-stackwalker=%s" % spec['dyninst'].prefix,
|
||||
"--with-libdwarf=%s" % spec['libdwarf'].prefix)
|
||||
"--with-libdwarf=%s" % spec['libdwarf'].prefix
|
||||
]
|
||||
if '+dysect' in spec:
|
||||
configure_args.append('--enable-dysectapi')
|
||||
configure(*configure_args)
|
||||
|
||||
make(parallel=False)
|
||||
make("install")
|
||||
|
|
|
@ -38,6 +38,8 @@ class Swig(Package):
|
|||
|
||||
version('3.0.2', '62f9b0d010cef36a13a010dc530d0d41')
|
||||
|
||||
depends_on('pcre')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=%s" % prefix)
|
||||
make()
|
||||
|
|
50
var/spack/packages/trilinos/package.py
Normal file
50
var/spack/packages/trilinos/package.py
Normal file
|
@ -0,0 +1,50 @@
|
|||
from spack import *
|
||||
|
||||
|
||||
class Trilinos(Package):
|
||||
"""
|
||||
The Trilinos Project is an effort to develop algorithms and enabling technologies within an object-oriented
|
||||
software framework for the solution of large-scale, complex multi-physics engineering and scientific problems.
|
||||
A unique design feature of Trilinos is its focus on packages.
|
||||
"""
|
||||
homepage = "https://trilinos.org/"
|
||||
url = "http://trilinos.csbsju.edu/download/files/trilinos-12.2.1-Source.tar.gz"
|
||||
|
||||
version('12.2.1', '6161926ea247863c690e927687f83be9')
|
||||
version('12.0.1', 'bd99741d047471e127b8296b2ec08017')
|
||||
version('11.14.3', '2f4f83f8333e4233c57d0f01c4b57426')
|
||||
version('11.14.2', 'a43590cf896c677890d75bfe75bc6254')
|
||||
version('11.14.1', '40febc57f76668be8b6a77b7607bb67f')
|
||||
|
||||
variant('mpi', default=True, description='Add a dependency on MPI and enables MPI dependent packages')
|
||||
|
||||
# Everything should be compiled with -fpic
|
||||
depends_on('blas')
|
||||
depends_on('lapack')
|
||||
depends_on('boost')
|
||||
depends_on('netcdf')
|
||||
depends_on('matio')
|
||||
depends_on('glm')
|
||||
depends_on('swig')
|
||||
depends_on('mpi', when='+mpi')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
|
||||
options = [
|
||||
'-DTrilinos_ENABLE_ALL_PACKAGES:BOOL=ON',
|
||||
'-DTrilinos_ENABLE_TESTS:BOOL=OFF',
|
||||
'-DTrilinos_ENABLE_EXAMPLES:BOOL=OFF',
|
||||
'-DBUILD_SHARED_LIBS:BOOL=ON',
|
||||
'-DBLAS_LIBRARY_DIRS:PATH=%s' % spec['blas'].prefix,
|
||||
'-DLAPACK_LIBRARY_DIRS:PATH=%s' % spec['lapack'].prefix
|
||||
]
|
||||
if '+mpi' in spec:
|
||||
mpi_options = ['-DTPL_ENABLE_MPI:BOOL=ON']
|
||||
options.extend(mpi_options)
|
||||
|
||||
# -DCMAKE_INSTALL_PREFIX and all the likes...
|
||||
options.extend(std_cmake_args)
|
||||
with working_dir('spack-build', create=True):
|
||||
cmake('..', *options)
|
||||
make()
|
||||
make('install')
|
|
@ -8,9 +8,13 @@ class Xz(Package):
|
|||
homepage = "http://tukaani.org/xz/"
|
||||
url = "http://tukaani.org/xz/xz-5.2.0.tar.bz2"
|
||||
|
||||
version('5.2.0', '867cc8611760240ebf3440bd6e170bb9')
|
||||
|
||||
version('5.2.0', '867cc8611760240ebf3440bd6e170bb9',
|
||||
url = 'http://tukaani.org/xz/xz-5.2.0.tar.bz2')
|
||||
version('5.2.2', 'f90c9a0c8b259aee2234c4e0d7fd70af',
|
||||
url = 'http://tukaani.org/xz/xz-5.2.2.tar.bz2')
|
||||
|
||||
def install(self, spec, prefix):
|
||||
configure("--prefix=%s" % prefix)
|
||||
make()
|
||||
make("install")
|
||||
|
||||
|
|
Loading…
Reference in a new issue