maintainers: switch from list to directive (#37752)

This commit is contained in:
Tamara Dahlgren 2023-05-17 15:25:57 -07:00 committed by GitHub
parent 125c20bc06
commit dcfcc03497
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 1269 additions and 1180 deletions

View file

@ -31,194 +31,164 @@ class Amdfftw(FftwBase):
Example : spack install amdfftw precision=float Example : spack install amdfftw precision=float
""" """
_name = 'amdfftw' _name = "amdfftw"
homepage = "https://developer.amd.com/amd-aocl/fftw/" homepage = "https://developer.amd.com/amd-aocl/fftw/"
url = "https://github.com/amd/amd-fftw/archive/3.0.tar.gz" url = "https://github.com/amd/amd-fftw/archive/3.0.tar.gz"
git = "https://github.com/amd/amd-fftw.git" git = "https://github.com/amd/amd-fftw.git"
maintainers = ['amd-toolchain-support'] maintainers("amd-toolchain-support")
version('3.1', sha256='3e777f3acef13fa1910db097e818b1d0d03a6a36ef41186247c6ab1ab0afc132') version("3.1", sha256="3e777f3acef13fa1910db097e818b1d0d03a6a36ef41186247c6ab1ab0afc132")
version('3.0.1', sha256='87030c6bbb9c710f0a64f4f306ba6aa91dc4b182bb804c9022b35aef274d1a4c') version("3.0.1", sha256="87030c6bbb9c710f0a64f4f306ba6aa91dc4b182bb804c9022b35aef274d1a4c")
version('3.0', sha256='a69deaf45478a59a69f77c4f7e9872967f1cfe996592dd12beb6318f18ea0bcd') version("3.0", sha256="a69deaf45478a59a69f77c4f7e9872967f1cfe996592dd12beb6318f18ea0bcd")
version('2.2', sha256='de9d777236fb290c335860b458131678f75aa0799c641490c644c843f0e246f8') version("2.2", sha256="de9d777236fb290c335860b458131678f75aa0799c641490c644c843f0e246f8")
variant('shared', default=True, variant("shared", default=True, description="Builds a shared version of the library")
description='Builds a shared version of the library') variant("openmp", default=True, description="Enable OpenMP support")
variant('openmp', default=True, variant("threads", default=False, description="Enable SMP threads support")
description='Enable OpenMP support') variant("debug", default=False, description="Builds a debug version of the library")
variant('threads', default=False,
description='Enable SMP threads support')
variant('debug', default=False,
description='Builds a debug version of the library')
variant( variant(
'amd-fast-planner', "amd-fast-planner",
default=False, default=False,
description='Option to reduce the planning time without much' description="Option to reduce the planning time without much"
'tradeoff in the performance. It is supported for' "tradeoff in the performance. It is supported for"
'Float and double precisions only.') "Float and double precisions only.",
)
variant("amd-top-n-planner", default=False, description="Build with amd-top-n-planner support")
variant( variant(
'amd-top-n-planner', "amd-mpi-vader-limit", default=False, description="Build with amd-mpi-vader-limit support"
default=False, )
description='Build with amd-top-n-planner support') variant("static", default=False, description="Build with static suppport")
variant( variant("amd-trans", default=False, description="Build with amd-trans suppport")
'amd-mpi-vader-limit', variant("amd-app-opt", default=False, description="Build with amd-app-opt suppport")
default=False,
description='Build with amd-mpi-vader-limit support')
variant(
'static',
default=False,
description='Build with static suppport')
variant(
'amd-trans',
default=False,
description='Build with amd-trans suppport')
variant(
'amd-app-opt',
default=False,
description='Build with amd-app-opt suppport')
depends_on('texinfo') depends_on("texinfo")
provides('fftw-api@3', when='@2:') provides("fftw-api@3", when="@2:")
conflicts( conflicts(
'precision=quad', "precision=quad",
when='@2.2 %aocc', when="@2.2 %aocc",
msg='Quad precision is not supported by AOCC clang version 2.2') msg="Quad precision is not supported by AOCC clang version 2.2",
)
conflicts( conflicts(
'+debug', "+debug", when="@2.2 %aocc", msg="debug mode is not supported by AOCC clang version 2.2"
when='@2.2 %aocc', )
msg='debug mode is not supported by AOCC clang version 2.2') conflicts("%gcc@:7.2", when="@2.2:", msg="GCC version above 7.2 is required for AMDFFTW")
conflicts( conflicts(
'%gcc@:7.2', "+amd-fast-planner ", when="+mpi", msg="mpi thread is not supported with amd-fast-planner"
when='@2.2:', )
msg='GCC version above 7.2 is required for AMDFFTW')
conflicts( conflicts(
'+amd-fast-planner ', "+amd-fast-planner", when="@2.2", msg="amd-fast-planner is supported from 3.0 onwards"
when='+mpi', )
msg='mpi thread is not supported with amd-fast-planner')
conflicts( conflicts(
'+amd-fast-planner', "+amd-fast-planner",
when='@2.2', when="precision=quad",
msg='amd-fast-planner is supported from 3.0 onwards') msg="Quad precision is not supported with amd-fast-planner",
)
conflicts( conflicts(
'+amd-fast-planner', "+amd-fast-planner",
when='precision=quad', when="precision=long_double",
msg='Quad precision is not supported with amd-fast-planner') msg="long_double precision is not supported with amd-fast-planner",
)
conflicts( conflicts(
'+amd-fast-planner', "+amd-top-n-planner",
when='precision=long_double', when="@:3.0.0",
msg='long_double precision is not supported with amd-fast-planner') msg="amd-top-n-planner is supported from 3.0.1 onwards",
)
conflicts( conflicts(
'+amd-top-n-planner', "+amd-top-n-planner",
when='@:3.0.0', when="precision=long_double",
msg='amd-top-n-planner is supported from 3.0.1 onwards') msg="long_double precision is not supported with amd-top-n-planner",
)
conflicts( conflicts(
'+amd-top-n-planner', "+amd-top-n-planner",
when='precision=long_double', when="precision=quad",
msg='long_double precision is not supported with amd-top-n-planner') msg="Quad precision is not supported with amd-top-n-planner",
)
conflicts( conflicts(
'+amd-top-n-planner', "+amd-top-n-planner",
when='precision=quad', when="+amd-fast-planner",
msg='Quad precision is not supported with amd-top-n-planner') msg="amd-top-n-planner cannot be used with amd-fast-planner",
)
conflicts( conflicts(
'+amd-top-n-planner', "+amd-top-n-planner", when="+threads", msg="amd-top-n-planner works only for single thread"
when='+amd-fast-planner', )
msg='amd-top-n-planner cannot be used with amd-fast-planner')
conflicts( conflicts(
'+amd-top-n-planner', "+amd-top-n-planner", when="+mpi", msg="mpi thread is not supported with amd-top-n-planner"
when='+threads', )
msg='amd-top-n-planner works only for single thread')
conflicts( conflicts(
'+amd-top-n-planner', "+amd-top-n-planner",
when='+mpi', when="+openmp",
msg='mpi thread is not supported with amd-top-n-planner') msg="openmp thread is not supported with amd-top-n-planner",
)
conflicts( conflicts(
'+amd-top-n-planner', "+amd-mpi-vader-limit",
when='+openmp', when="@:3.0.0",
msg='openmp thread is not supported with amd-top-n-planner') msg="amd-mpi-vader-limit is supported from 3.0.1 onwards",
)
conflicts( conflicts(
'+amd-mpi-vader-limit', "+amd-mpi-vader-limit",
when='@:3.0.0', when="precision=quad",
msg='amd-mpi-vader-limit is supported from 3.0.1 onwards') msg="Quad precision is not supported with amd-mpi-vader-limit",
)
conflicts("+amd-trans", when="+threads", msg="amd-trans works only for single thread")
conflicts("+amd-trans", when="+mpi", msg="mpi thread is not supported with amd-trans")
conflicts("+amd-trans", when="+openmp", msg="openmp thread is not supported with amd-trans")
conflicts( conflicts(
'+amd-mpi-vader-limit', "+amd-trans",
when='precision=quad', when="precision=long_double",
msg='Quad precision is not supported with amd-mpi-vader-limit') msg="long_double precision is not supported with amd-trans",
)
conflicts( conflicts(
'+amd-trans', "+amd-trans", when="precision=quad", msg="Quad precision is not supported with amd-trans"
when='+threads', )
msg='amd-trans works only for single thread') conflicts("+amd-app-opt", when="@:3.0.1", msg="amd-app-opt is supported from 3.1 onwards")
conflicts("+amd-app-opt", when="+mpi", msg="mpi thread is not supported with amd-app-opt")
conflicts( conflicts(
'+amd-trans', "+amd-app-opt",
when='+mpi', when="precision=long_double",
msg='mpi thread is not supported with amd-trans') msg="long_double precision is not supported with amd-app-opt",
)
conflicts( conflicts(
'+amd-trans', "+amd-app-opt",
when='+openmp', when="precision=quad",
msg='openmp thread is not supported with amd-trans') msg="Quad precision is not supported with amd-app-opt",
conflicts( )
'+amd-trans',
when='precision=long_double',
msg='long_double precision is not supported with amd-trans')
conflicts(
'+amd-trans',
when='precision=quad',
msg='Quad precision is not supported with amd-trans')
conflicts(
'+amd-app-opt',
when='@:3.0.1',
msg='amd-app-opt is supported from 3.1 onwards')
conflicts(
'+amd-app-opt',
when='+mpi',
msg='mpi thread is not supported with amd-app-opt')
conflicts(
'+amd-app-opt',
when='precision=long_double',
msg='long_double precision is not supported with amd-app-opt')
conflicts(
'+amd-app-opt',
when='precision=quad',
msg='Quad precision is not supported with amd-app-opt')
def configure(self, spec, prefix): def configure(self, spec, prefix):
"""Configure function""" """Configure function"""
# Base options # Base options
options = [ options = ["--prefix={0}".format(prefix), "--enable-amd-opt"]
'--prefix={0}'.format(prefix),
'--enable-amd-opt'
]
# Check if compiler is AOCC # Check if compiler is AOCC
if '%aocc' in spec: if "%aocc" in spec:
options.append('CC={0}'.format(os.path.basename(spack_cc))) options.append("CC={0}".format(os.path.basename(spack_cc)))
options.append('FC={0}'.format(os.path.basename(spack_fc))) options.append("FC={0}".format(os.path.basename(spack_fc)))
options.append('F77={0}'.format(os.path.basename(spack_fc))) options.append("F77={0}".format(os.path.basename(spack_fc)))
if '+debug' in spec: if "+debug" in spec:
options.append('--enable-debug') options.append("--enable-debug")
if '+mpi' in spec: if "+mpi" in spec:
options.append('--enable-mpi') options.append("--enable-mpi")
options.append('--enable-amd-mpifft') options.append("--enable-amd-mpifft")
else: else:
options.append('--disable-mpi') options.append("--disable-mpi")
options.append('--disable-amd-mpifft') options.append("--disable-amd-mpifft")
options.extend(self.enable_or_disable('shared')) options.extend(self.enable_or_disable("shared"))
options.extend(self.enable_or_disable('openmp')) options.extend(self.enable_or_disable("openmp"))
options.extend(self.enable_or_disable('threads')) options.extend(self.enable_or_disable("threads"))
options.extend(self.enable_or_disable('amd-fast-planner')) options.extend(self.enable_or_disable("amd-fast-planner"))
options.extend(self.enable_or_disable('amd-top-n-planner')) options.extend(self.enable_or_disable("amd-top-n-planner"))
options.extend(self.enable_or_disable('amd-mpi-vader-limit')) options.extend(self.enable_or_disable("amd-mpi-vader-limit"))
options.extend(self.enable_or_disable('static')) options.extend(self.enable_or_disable("static"))
options.extend(self.enable_or_disable('amd-trans')) options.extend(self.enable_or_disable("amd-trans"))
options.extend(self.enable_or_disable('amd-app-opt')) options.extend(self.enable_or_disable("amd-app-opt"))
if not self.compiler.f77 or not self.compiler.fc: if not self.compiler.f77 or not self.compiler.fc:
options.append('--disable-fortran') options.append("--disable-fortran")
# Cross compilation is supported in amd-fftw by making use of target # Cross compilation is supported in amd-fftw by making use of target
# variable to set AMD_ARCH configure option. # variable to set AMD_ARCH configure option.
@ -226,17 +196,16 @@ class Amdfftw(FftwBase):
# use target variable to set appropriate -march option in AMD_ARCH. # use target variable to set appropriate -march option in AMD_ARCH.
arch = spec.architecture arch = spec.architecture
options.append( options.append(
'AMD_ARCH={0}'.format( "AMD_ARCH={0}".format(arch.target.optimization_flags(spec.compiler).split("=")[-1])
arch.target.optimization_flags( )
spec.compiler).split('=')[-1]))
# Specific SIMD support. # Specific SIMD support.
# float and double precisions are supported # float and double precisions are supported
simd_features = ['sse2', 'avx', 'avx2'] simd_features = ["sse2", "avx", "avx2"]
simd_options = [] simd_options = []
for feature in simd_features: for feature in simd_features:
msg = '--enable-{0}' if feature in spec.target else '--disable-{0}' msg = "--enable-{0}" if feature in spec.target else "--disable-{0}"
simd_options.append(msg.format(feature)) simd_options.append(msg.format(feature))
# When enabling configure option "--enable-amd-opt", do not use the # When enabling configure option "--enable-amd-opt", do not use the
@ -246,20 +215,19 @@ class Amdfftw(FftwBase):
# Double is the default precision, for all the others we need # Double is the default precision, for all the others we need
# to enable the corresponding option. # to enable the corresponding option.
enable_precision = { enable_precision = {
'float': ['--enable-float'], "float": ["--enable-float"],
'double': None, "double": None,
'long_double': ['--enable-long-double'], "long_double": ["--enable-long-double"],
'quad': ['--enable-quad-precision'] "quad": ["--enable-quad-precision"],
} }
# Different precisions must be configured and compiled one at a time # Different precisions must be configured and compiled one at a time
configure = Executable('../configure') configure = Executable("../configure")
for precision in self.selected_precisions: for precision in self.selected_precisions:
opts = (enable_precision[precision] or []) + options[:] opts = (enable_precision[precision] or []) + options[:]
# SIMD optimizations are available only for float and double # SIMD optimizations are available only for float and double
if precision in ('float', 'double'): if precision in ("float", "double"):
opts += simd_options opts += simd_options
with working_dir(precision, create=True): with working_dir(precision, create=True):

View file

@ -26,11 +26,11 @@ class Llvm(CMakePackage, CudaPackage):
url = "https://github.com/llvm/llvm-project/archive/llvmorg-7.1.0.tar.gz" url = "https://github.com/llvm/llvm-project/archive/llvmorg-7.1.0.tar.gz"
list_url = "https://releases.llvm.org/download.html" list_url = "https://releases.llvm.org/download.html"
git = "https://github.com/llvm/llvm-project" git = "https://github.com/llvm/llvm-project"
maintainers = ['trws', 'haampie'] maintainers("trws", "haampie")
tags = ['e4s'] tags = ["e4s"]
generator = 'Ninja' generator = "Ninja"
family = "compiler" # Used by lmod family = "compiler" # Used by lmod
@ -80,13 +80,12 @@ class Llvm(CMakePackage, CudaPackage):
# to save space, build with `build_type=Release`. # to save space, build with `build_type=Release`.
variant( variant(
"clang", "clang", default=True, description="Build the LLVM C/C++/Objective-C compiler frontend"
default=True,
description="Build the LLVM C/C++/Objective-C compiler frontend",
) )
variant( variant(
"flang", "flang",
default=False, when='@11: +clang', default=False,
when="@11: +clang",
description="Build the LLVM Fortran compiler frontend " description="Build the LLVM Fortran compiler frontend "
"(experimental - parser only, needs GCC)", "(experimental - parser only, needs GCC)",
) )
@ -95,27 +94,23 @@ class Llvm(CMakePackage, CudaPackage):
default=False, default=False,
description="Include debugging code in OpenMP runtime libraries", description="Include debugging code in OpenMP runtime libraries",
) )
variant("lldb", default=True, when='+clang', description="Build the LLVM debugger") variant("lldb", default=True, when="+clang", description="Build the LLVM debugger")
variant("lld", default=True, description="Build the LLVM linker") variant("lld", default=True, description="Build the LLVM linker")
variant("mlir", default=False, when='@10:', description="Build with MLIR support") variant("mlir", default=False, when="@10:", description="Build with MLIR support")
variant( variant(
"internal_unwind", "internal_unwind", default=True, when="+clang", description="Build the libcxxabi libunwind"
default=True, when='+clang',
description="Build the libcxxabi libunwind",
) )
variant( variant(
"polly", "polly",
default=True, default=True,
description="Build the LLVM polyhedral optimization plugin, " description="Build the LLVM polyhedral optimization plugin, " "only builds for 3.7.0+",
"only builds for 3.7.0+",
) )
variant( variant(
"libcxx", "libcxx", default=True, when="+clang", description="Build the LLVM C++ standard library"
default=True, when='+clang',
description="Build the LLVM C++ standard library",
) )
variant( variant(
"compiler-rt", when='+clang', "compiler-rt",
when="+clang",
default=True, default=True,
description="Build LLVM compiler runtime, including sanitizers", description="Build LLVM compiler runtime, including sanitizers",
) )
@ -124,11 +119,7 @@ class Llvm(CMakePackage, CudaPackage):
default=(sys.platform != "darwin"), default=(sys.platform != "darwin"),
description="Add support for LTO with the gold linker plugin", description="Add support for LTO with the gold linker plugin",
) )
variant( variant("split_dwarf", default=False, description="Build with split dwarf information")
"split_dwarf",
default=False,
description="Build with split dwarf information",
)
variant( variant(
"llvm_dylib", "llvm_dylib",
default=True, default=True,
@ -136,18 +127,40 @@ class Llvm(CMakePackage, CudaPackage):
) )
variant( variant(
"link_llvm_dylib", "link_llvm_dylib",
default=False, when='+llvm_dylib', default=False,
when="+llvm_dylib",
description="Link LLVM tools against the LLVM shared library", description="Link LLVM tools against the LLVM shared library",
) )
variant( variant(
"targets", "targets",
default="none", default="none",
description=("What targets to build. Spack's target family is always added " description=(
"(e.g. X86 is automatically enabled when targeting znver2)."), "What targets to build. Spack's target family is always added "
values=("all", "none", "aarch64", "amdgpu", "arm", "avr", "bpf", "cppbackend", "(e.g. X86 is automatically enabled when targeting znver2)."
"hexagon", "lanai", "mips", "msp430", "nvptx", "powerpc", "riscv", ),
"sparc", "systemz", "webassembly", "x86", "xcore"), values=(
multi=True "all",
"none",
"aarch64",
"amdgpu",
"arm",
"avr",
"bpf",
"cppbackend",
"hexagon",
"lanai",
"mips",
"msp430",
"nvptx",
"powerpc",
"riscv",
"sparc",
"systemz",
"webassembly",
"x86",
"xcore",
),
multi=True,
) )
variant( variant(
"build_type", "build_type",
@ -157,51 +170,52 @@ class Llvm(CMakePackage, CudaPackage):
) )
variant( variant(
"omp_tsan", "omp_tsan",
default=False, when='@6:', default=False,
when="@6:",
description="Build with OpenMP capable thread sanitizer", description="Build with OpenMP capable thread sanitizer",
) )
variant( variant(
"omp_as_runtime", "omp_as_runtime",
default=True, default=True,
when='+clang @12:', when="+clang @12:",
description="Build OpenMP runtime via ENABLE_RUNTIME by just-built Clang", description="Build OpenMP runtime via ENABLE_RUNTIME by just-built Clang",
) )
variant('code_signing', default=False,
when='+lldb platform=darwin',
description="Enable code-signing on macOS")
variant("python", default=False, description="Install python bindings")
variant('version_suffix', default='none', description="Add a symbol suffix")
variant( variant(
'shlib_symbol_version', "code_signing",
default='none', default=False,
when="+lldb platform=darwin",
description="Enable code-signing on macOS",
)
variant("python", default=False, description="Install python bindings")
variant("version_suffix", default="none", description="Add a symbol suffix")
variant(
"shlib_symbol_version",
default="none",
description="Add shared library symbol version", description="Add shared library symbol version",
when='@13:' when="@13:",
) )
variant( variant(
'z3', "z3", default=False, when="+clang @8:", description="Use Z3 for the clang static analyzer"
default=False,
when='+clang @8:',
description='Use Z3 for the clang static analyzer'
) )
provides('libllvm@14', when='@14.0.0:14') provides("libllvm@14", when="@14.0.0:14")
provides('libllvm@13', when='@13.0.0:13') provides("libllvm@13", when="@13.0.0:13")
provides('libllvm@12', when='@12.0.0:12') provides("libllvm@12", when="@12.0.0:12")
provides('libllvm@11', when='@11.0.0:11') provides("libllvm@11", when="@11.0.0:11")
provides('libllvm@10', when='@10.0.0:10') provides("libllvm@10", when="@10.0.0:10")
provides('libllvm@9', when='@9.0.0:9') provides("libllvm@9", when="@9.0.0:9")
provides('libllvm@8', when='@8.0.0:8') provides("libllvm@8", when="@8.0.0:8")
provides('libllvm@7', when='@7.0.0:7') provides("libllvm@7", when="@7.0.0:7")
provides('libllvm@6', when='@6.0.0:6') provides("libllvm@6", when="@6.0.0:6")
provides('libllvm@5', when='@5.0.0:5') provides("libllvm@5", when="@5.0.0:5")
provides('libllvm@4', when='@4.0.0:4') provides("libllvm@4", when="@4.0.0:4")
provides('libllvm@3', when='@3.0.0:3') provides("libllvm@3", when="@3.0.0:3")
extends("python", when="+python") extends("python", when="+python")
# Build dependency # Build dependency
depends_on("cmake@3.4.3:", type="build") depends_on("cmake@3.4.3:", type="build")
depends_on('cmake@3.13.4:', type='build', when='@12:') depends_on("cmake@3.13.4:", type="build", when="@12:")
depends_on("ninja", type="build") depends_on("ninja", type="build")
depends_on("python@2.7:2.8", when="@:4 ~python", type="build") depends_on("python@2.7:2.8", when="@:4 ~python", type="build")
depends_on("python", when="@5: ~python", type="build") depends_on("python", when="@5: ~python", type="build")
@ -242,7 +256,7 @@ class Llvm(CMakePackage, CudaPackage):
# clang/lib: a lambda parameter cannot shadow an explicitly captured entity # clang/lib: a lambda parameter cannot shadow an explicitly captured entity
conflicts("%clang@8:", when="@:4") conflicts("%clang@8:", when="@:4")
# Internal compiler error on gcc 8.4 on aarch64 https://bugzilla.redhat.com/show_bug.cgi?id=1958295 # Internal compiler error on gcc 8.4 on aarch64 https://bugzilla.redhat.com/show_bug.cgi?id=1958295
conflicts('%gcc@8.4:8.4.9', when='@12: target=aarch64:') conflicts("%gcc@8.4:8.4.9", when="@12: target=aarch64:")
# When these versions are concretized, but not explicitly with +libcxx, these # When these versions are concretized, but not explicitly with +libcxx, these
# conflicts will enable clingo to set ~libcxx, making the build successful: # conflicts will enable clingo to set ~libcxx, making the build successful:
@ -257,12 +271,12 @@ class Llvm(CMakePackage, CudaPackage):
conflicts("%apple-clang@:11", when="@13:+libcxx") conflicts("%apple-clang@:11", when="@13:+libcxx")
# libcxx-4 and compiler-rt-4 fail to build with "newer" clang and gcc versions: # libcxx-4 and compiler-rt-4 fail to build with "newer" clang and gcc versions:
conflicts('%gcc@7:', when='@:4+libcxx') conflicts("%gcc@7:", when="@:4+libcxx")
conflicts('%clang@6:', when='@:4+libcxx') conflicts("%clang@6:", when="@:4+libcxx")
conflicts('%apple-clang@6:', when='@:4+libcxx') conflicts("%apple-clang@6:", when="@:4+libcxx")
conflicts('%gcc@7:', when='@:4+compiler-rt') conflicts("%gcc@7:", when="@:4+compiler-rt")
conflicts('%clang@6:', when='@:4+compiler-rt') conflicts("%clang@6:", when="@:4+compiler-rt")
conflicts('%apple-clang@6:', when='@:4+compiler-rt') conflicts("%apple-clang@6:", when="@:4+compiler-rt")
# cuda_arch value must be specified # cuda_arch value must be specified
conflicts("cuda_arch=none", when="+cuda", msg="A value for cuda_arch must be specified.") conflicts("cuda_arch=none", when="+cuda", msg="A value for cuda_arch must be specified.")
@ -270,27 +284,27 @@ class Llvm(CMakePackage, CudaPackage):
# LLVM bug https://bugs.llvm.org/show_bug.cgi?id=48234 # LLVM bug https://bugs.llvm.org/show_bug.cgi?id=48234
# CMake bug: https://gitlab.kitware.com/cmake/cmake/-/issues/21469 # CMake bug: https://gitlab.kitware.com/cmake/cmake/-/issues/21469
# Fixed in upstream versions of both # Fixed in upstream versions of both
conflicts('^cmake@3.19.0', when='@6:11.0.0') conflicts("^cmake@3.19.0", when="@6:11.0.0")
# Github issue #4986 # Github issue #4986
patch("llvm_gcc7.patch", when="@4.0.0:4.0.1+lldb %gcc@7.0:") patch("llvm_gcc7.patch", when="@4.0.0:4.0.1+lldb %gcc@7.0:")
# sys/ustat.h has been removed in favour of statfs from glibc-2.28. Use fixed sizes: # sys/ustat.h has been removed in favour of statfs from glibc-2.28. Use fixed sizes:
patch('llvm5-sanitizer-ustat.patch', when="@4:6.0.0+compiler-rt") patch("llvm5-sanitizer-ustat.patch", when="@4:6.0.0+compiler-rt")
# Fix lld templates: https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=230463 # Fix lld templates: https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=230463
patch('llvm4-lld-ELF-Symbols.patch', when="@4+lld%clang@6:") patch("llvm4-lld-ELF-Symbols.patch", when="@4+lld%clang@6:")
patch('llvm5-lld-ELF-Symbols.patch', when="@5+lld%clang@7:") patch("llvm5-lld-ELF-Symbols.patch", when="@5+lld%clang@7:")
# Fix missing std:size_t in 'llvm@4:5' when built with '%clang@7:' # Fix missing std:size_t in 'llvm@4:5' when built with '%clang@7:'
patch('xray_buffer_queue-cstddef.patch', when="@4:5+compiler-rt%clang@7:") patch("xray_buffer_queue-cstddef.patch", when="@4:5+compiler-rt%clang@7:")
# https://github.com/llvm/llvm-project/commit/947f9692440836dcb8d88b74b69dd379d85974ce # https://github.com/llvm/llvm-project/commit/947f9692440836dcb8d88b74b69dd379d85974ce
patch('sanitizer-ipc_perm_mode.patch', when="@5:7+compiler-rt%clang@11:") patch("sanitizer-ipc_perm_mode.patch", when="@5:7+compiler-rt%clang@11:")
patch('sanitizer-ipc_perm_mode.patch', when="@5:9+compiler-rt%gcc@9:") patch("sanitizer-ipc_perm_mode.patch", when="@5:9+compiler-rt%gcc@9:")
# github.com/spack/spack/issues/24270: MicrosoftDemangle for %gcc@10: and %clang@13: # github.com/spack/spack/issues/24270: MicrosoftDemangle for %gcc@10: and %clang@13:
patch('missing-includes.patch', when='@8') patch("missing-includes.patch", when="@8")
# Backport from llvm master + additional fix # Backport from llvm master + additional fix
# see https://bugs.llvm.org/show_bug.cgi?id=39696 # see https://bugs.llvm.org/show_bug.cgi?id=39696
@ -315,33 +329,33 @@ class Llvm(CMakePackage, CudaPackage):
patch("llvm_python_path.patch", when="@:11") patch("llvm_python_path.patch", when="@:11")
# Workaround for issue https://github.com/spack/spack/issues/18197 # Workaround for issue https://github.com/spack/spack/issues/18197
patch('llvm7_intel.patch', when='@7 %intel@18.0.2,19.0.0:19.1.99') patch("llvm7_intel.patch", when="@7 %intel@18.0.2,19.0.0:19.1.99")
# Remove cyclades support to build against newer kernel headers # Remove cyclades support to build against newer kernel headers
# https://reviews.llvm.org/D102059 # https://reviews.llvm.org/D102059
patch('no_cyclades.patch', when='@10:12.0.0') patch("no_cyclades.patch", when="@10:12.0.0")
patch('no_cyclades9.patch', when='@6:9') patch("no_cyclades9.patch", when="@6:9")
patch('llvm-gcc11.patch', when='@9:11%gcc@11:') patch("llvm-gcc11.patch", when="@9:11%gcc@11:")
# add -lpthread to build OpenMP libraries with Fujitsu compiler # add -lpthread to build OpenMP libraries with Fujitsu compiler
patch('llvm12-thread.patch', when='@12 %fj') patch("llvm12-thread.patch", when="@12 %fj")
patch('llvm13-thread.patch', when='@13 %fj') patch("llvm13-thread.patch", when="@13 %fj")
# avoid build failed with Fujitsu compiler # avoid build failed with Fujitsu compiler
patch('llvm13-fujitsu.patch', when='@13 %fj') patch("llvm13-fujitsu.patch", when="@13 %fj")
# patch for missing hwloc.h include for libompd # patch for missing hwloc.h include for libompd
patch('llvm14-hwloc-ompd.patch', when='@14') patch("llvm14-hwloc-ompd.patch", when="@14")
# make libflags a list in openmp subproject when ~omp_as_runtime # make libflags a list in openmp subproject when ~omp_as_runtime
patch('libomp-libflags-as-list.patch', when='@3.7:') patch("libomp-libflags-as-list.patch", when="@3.7:")
# The functions and attributes below implement external package # The functions and attributes below implement external package
# detection for LLVM. See: # detection for LLVM. See:
# #
# https://spack.readthedocs.io/en/latest/packaging_guide.html#making-a-package-discoverable-with-spack-external-find # https://spack.readthedocs.io/en/latest/packaging_guide.html#making-a-package-discoverable-with-spack-external-find
executables = ['clang', 'flang', 'ld.lld', 'lldb'] executables = ["clang", "flang", "ld.lld", "lldb"]
@classmethod @classmethod
def filter_detected_exes(cls, prefix, exes_in_prefix): def filter_detected_exes(cls, prefix, exes_in_prefix):
@ -351,7 +365,7 @@ class Llvm(CMakePackage, CudaPackage):
# on some port and would hang Spack during detection. # on some port and would hang Spack during detection.
# clang-cl and clang-cpp are dev tools that we don't # clang-cl and clang-cpp are dev tools that we don't
# need to test # need to test
if any(x in exe for x in ('vscode', 'cpp', '-cl', '-gpu')): if any(x in exe for x in ("vscode", "cpp", "-cl", "-gpu")):
continue continue
result.append(exe) result.append(exe)
return result return result
@ -360,20 +374,20 @@ class Llvm(CMakePackage, CudaPackage):
def determine_version(cls, exe): def determine_version(cls, exe):
version_regex = re.compile( version_regex = re.compile(
# Normal clang compiler versions are left as-is # Normal clang compiler versions are left as-is
r'clang version ([^ )\n]+)-svn[~.\w\d-]*|' r"clang version ([^ )\n]+)-svn[~.\w\d-]*|"
# Don't include hyphenated patch numbers in the version # Don't include hyphenated patch numbers in the version
# (see https://github.com/spack/spack/pull/14365 for details) # (see https://github.com/spack/spack/pull/14365 for details)
r'clang version ([^ )\n]+?)-[~.\w\d-]*|' r"clang version ([^ )\n]+?)-[~.\w\d-]*|"
r'clang version ([^ )\n]+)|' r"clang version ([^ )\n]+)|"
# LLDB # LLDB
r'lldb version ([^ )\n]+)|' r"lldb version ([^ )\n]+)|"
# LLD # LLD
r'LLD ([^ )\n]+) \(compatible with GNU linkers\)' r"LLD ([^ )\n]+) \(compatible with GNU linkers\)"
) )
try: try:
compiler = Executable(exe) compiler = Executable(exe)
output = compiler('--version', output=str, error=str) output = compiler("--version", output=str, error=str)
if 'Apple' in output: if "Apple" in output:
return None return None
match = version_regex.search(output) match = version_regex.search(output)
if match: if match:
@ -387,38 +401,39 @@ class Llvm(CMakePackage, CudaPackage):
@classmethod @classmethod
def determine_variants(cls, exes, version_str): def determine_variants(cls, exes, version_str):
variants, compilers = ['+clang'], {} variants, compilers = ["+clang"], {}
lld_found, lldb_found = False, False lld_found, lldb_found = False, False
for exe in exes: for exe in exes:
if 'clang++' in exe: if "clang++" in exe:
compilers['cxx'] = exe compilers["cxx"] = exe
elif 'clang' in exe: elif "clang" in exe:
compilers['c'] = exe compilers["c"] = exe
elif 'flang' in exe: elif "flang" in exe:
variants.append('+flang') variants.append("+flang")
compilers['fc'] = exe compilers["fc"] = exe
compilers['f77'] = exe compilers["f77"] = exe
elif 'ld.lld' in exe: elif "ld.lld" in exe:
lld_found = True lld_found = True
compilers['ld'] = exe compilers["ld"] = exe
elif 'lldb' in exe: elif "lldb" in exe:
lldb_found = True lldb_found = True
compilers['lldb'] = exe compilers["lldb"] = exe
variants.append('+lld' if lld_found else '~lld') variants.append("+lld" if lld_found else "~lld")
variants.append('+lldb' if lldb_found else '~lldb') variants.append("+lldb" if lldb_found else "~lldb")
return ''.join(variants), {'compilers': compilers} return "".join(variants), {"compilers": compilers}
@classmethod @classmethod
def validate_detected_spec(cls, spec, extra_attributes): def validate_detected_spec(cls, spec, extra_attributes):
# For LLVM 'compilers' is a mandatory attribute # For LLVM 'compilers' is a mandatory attribute
msg = ('the extra attribute "compilers" must be set for ' msg = 'the extra attribute "compilers" must be set for ' 'the detected spec "{0}"'.format(
'the detected spec "{0}"'.format(spec)) spec
assert 'compilers' in extra_attributes, msg )
compilers = extra_attributes['compilers'] assert "compilers" in extra_attributes, msg
for key in ('c', 'cxx'): compilers = extra_attributes["compilers"]
msg = '{0} compiler not found for {1}' for key in ("c", "cxx"):
msg = "{0} compiler not found for {1}"
assert key in compilers, msg.format(key, spec) assert key in compilers, msg.format(key, spec)
@property @property
@ -426,10 +441,10 @@ class Llvm(CMakePackage, CudaPackage):
msg = "cannot retrieve C compiler [spec is not concrete]" msg = "cannot retrieve C compiler [spec is not concrete]"
assert self.spec.concrete, msg assert self.spec.concrete, msg
if self.spec.external: if self.spec.external:
return self.spec.extra_attributes['compilers'].get('c', None) return self.spec.extra_attributes["compilers"].get("c", None)
result = None result = None
if '+clang' in self.spec: if "+clang" in self.spec:
result = os.path.join(self.spec.prefix.bin, 'clang') result = os.path.join(self.spec.prefix.bin, "clang")
return result return result
@property @property
@ -437,10 +452,10 @@ class Llvm(CMakePackage, CudaPackage):
msg = "cannot retrieve C++ compiler [spec is not concrete]" msg = "cannot retrieve C++ compiler [spec is not concrete]"
assert self.spec.concrete, msg assert self.spec.concrete, msg
if self.spec.external: if self.spec.external:
return self.spec.extra_attributes['compilers'].get('cxx', None) return self.spec.extra_attributes["compilers"].get("cxx", None)
result = None result = None
if '+clang' in self.spec: if "+clang" in self.spec:
result = os.path.join(self.spec.prefix.bin, 'clang++') result = os.path.join(self.spec.prefix.bin, "clang++")
return result return result
@property @property
@ -448,10 +463,10 @@ class Llvm(CMakePackage, CudaPackage):
msg = "cannot retrieve Fortran compiler [spec is not concrete]" msg = "cannot retrieve Fortran compiler [spec is not concrete]"
assert self.spec.concrete, msg assert self.spec.concrete, msg
if self.spec.external: if self.spec.external:
return self.spec.extra_attributes['compilers'].get('fc', None) return self.spec.extra_attributes["compilers"].get("fc", None)
result = None result = None
if '+flang' in self.spec: if "+flang" in self.spec:
result = os.path.join(self.spec.prefix.bin, 'flang') result = os.path.join(self.spec.prefix.bin, "flang")
return result return result
@property @property
@ -459,27 +474,25 @@ class Llvm(CMakePackage, CudaPackage):
msg = "cannot retrieve Fortran 77 compiler [spec is not concrete]" msg = "cannot retrieve Fortran 77 compiler [spec is not concrete]"
assert self.spec.concrete, msg assert self.spec.concrete, msg
if self.spec.external: if self.spec.external:
return self.spec.extra_attributes['compilers'].get('f77', None) return self.spec.extra_attributes["compilers"].get("f77", None)
result = None result = None
if '+flang' in self.spec: if "+flang" in self.spec:
result = os.path.join(self.spec.prefix.bin, 'flang') result = os.path.join(self.spec.prefix.bin, "flang")
return result return result
@property @property
def libs(self): def libs(self):
return LibraryList(self.llvm_config("--libfiles", "all", return LibraryList(self.llvm_config("--libfiles", "all", result="list"))
result="list"))
@run_before('cmake') @run_before("cmake")
def codesign_check(self): def codesign_check(self):
if self.spec.satisfies("+code_signing"): if self.spec.satisfies("+code_signing"):
codesign = which('codesign') codesign = which("codesign")
mkdir('tmp') mkdir("tmp")
llvm_check_file = join_path('tmp', 'llvm_check') llvm_check_file = join_path("tmp", "llvm_check")
copy('/usr/bin/false', llvm_check_file) copy("/usr/bin/false", llvm_check_file)
try: try:
codesign('-f', '-s', 'lldb_codesign', '--dryrun', codesign("-f", "-s", "lldb_codesign", "--dryrun", llvm_check_file)
llvm_check_file)
except ProcessError: except ProcessError:
# Newer LLVM versions have a simple script that sets up # Newer LLVM versions have a simple script that sets up
@ -489,32 +502,32 @@ class Llvm(CMakePackage, CudaPackage):
setup() setup()
except Exception: except Exception:
raise RuntimeError( raise RuntimeError(
'spack was unable to either find or set up' "spack was unable to either find or set up"
'code-signing on your system. Please refer to' "code-signing on your system. Please refer to"
'https://lldb.llvm.org/resources/build.html#' "https://lldb.llvm.org/resources/build.html#"
'code-signing-on-macos for details on how to' "code-signing-on-macos for details on how to"
'create this identity.' "create this identity."
) )
def flag_handler(self, name, flags): def flag_handler(self, name, flags):
if name == 'cxxflags': if name == "cxxflags":
flags.append(self.compiler.cxx11_flag) flags.append(self.compiler.cxx11_flag)
return (None, flags, None) return (None, flags, None)
elif name == 'ldflags' and self.spec.satisfies('%intel'): elif name == "ldflags" and self.spec.satisfies("%intel"):
flags.append('-shared-intel') flags.append("-shared-intel")
return (None, flags, None) return (None, flags, None)
return (flags, None, None) return (flags, None, None)
def setup_build_environment(self, env): def setup_build_environment(self, env):
"""When using %clang, add only its ld.lld-$ver and/or ld.lld to our PATH""" """When using %clang, add only its ld.lld-$ver and/or ld.lld to our PATH"""
if self.compiler.name in ['clang', 'apple-clang']: if self.compiler.name in ["clang", "apple-clang"]:
for lld in 'ld.lld-{0}'.format(self.compiler.version.version[0]), 'ld.lld': for lld in "ld.lld-{0}".format(self.compiler.version.version[0]), "ld.lld":
bin = os.path.join(os.path.dirname(self.compiler.cc), lld) bin = os.path.join(os.path.dirname(self.compiler.cc), lld)
sym = os.path.join(self.stage.path, 'ld.lld') sym = os.path.join(self.stage.path, "ld.lld")
if os.path.exists(bin) and not os.path.exists(sym): if os.path.exists(bin) and not os.path.exists(sym):
mkdirp(self.stage.path) mkdirp(self.stage.path)
os.symlink(bin, sym) os.symlink(bin, sym)
env.prepend_path('PATH', self.stage.path) env.prepend_path("PATH", self.stage.path)
def setup_run_environment(self, env): def setup_run_environment(self, env):
if "+clang" in self.spec: if "+clang" in self.spec:
@ -531,7 +544,7 @@ class Llvm(CMakePackage, CudaPackage):
define = CMakePackage.define define = CMakePackage.define
from_variant = self.define_from_variant from_variant = self.define_from_variant
python = spec['python'] python = spec["python"]
cmake_args = [ cmake_args = [
define("LLVM_REQUIRES_RTTI", True), define("LLVM_REQUIRES_RTTI", True),
define("LLVM_ENABLE_RTTI", True), define("LLVM_ENABLE_RTTI", True),
@ -544,14 +557,13 @@ class Llvm(CMakePackage, CudaPackage):
define("LIBOMP_HWLOC_INSTALL_DIR", spec["hwloc"].prefix), define("LIBOMP_HWLOC_INSTALL_DIR", spec["hwloc"].prefix),
] ]
version_suffix = spec.variants['version_suffix'].value version_suffix = spec.variants["version_suffix"].value
if version_suffix != 'none': if version_suffix != "none":
cmake_args.append(define('LLVM_VERSION_SUFFIX', version_suffix)) cmake_args.append(define("LLVM_VERSION_SUFFIX", version_suffix))
shlib_symbol_version = spec.variants.get('shlib_symbol_version', None) shlib_symbol_version = spec.variants.get("shlib_symbol_version", None)
if shlib_symbol_version is not None and shlib_symbol_version.value != 'none': if shlib_symbol_version is not None and shlib_symbol_version.value != "none":
cmake_args.append(define('LLVM_SHLIB_SYMBOL_VERSION', cmake_args.append(define("LLVM_SHLIB_SYMBOL_VERSION", shlib_symbol_version.value))
shlib_symbol_version.value))
if python.version >= Version("3"): if python.version >= Version("3"):
cmake_args.append(define("Python3_EXECUTABLE", python.command.path)) cmake_args.append(define("Python3_EXECUTABLE", python.command.path))
@ -562,47 +574,56 @@ class Llvm(CMakePackage, CudaPackage):
runtimes = [] runtimes = []
if "+cuda" in spec: if "+cuda" in spec:
cmake_args.extend([ cmake_args.extend(
[
define("CUDA_TOOLKIT_ROOT_DIR", spec["cuda"].prefix), define("CUDA_TOOLKIT_ROOT_DIR", spec["cuda"].prefix),
define("LIBOMPTARGET_NVPTX_COMPUTE_CAPABILITIES", define(
",".join(spec.variants["cuda_arch"].value)), "LIBOMPTARGET_NVPTX_COMPUTE_CAPABILITIES",
define("CLANG_OPENMP_NVPTX_DEFAULT_ARCH", ",".join(spec.variants["cuda_arch"].value),
"sm_{0}".format(spec.variants["cuda_arch"].value[-1])), ),
]) define(
"CLANG_OPENMP_NVPTX_DEFAULT_ARCH",
"sm_{0}".format(spec.variants["cuda_arch"].value[-1]),
),
]
)
if "+omp_as_runtime" in spec: if "+omp_as_runtime" in spec:
cmake_args.extend([ cmake_args.extend(
[
define("LIBOMPTARGET_NVPTX_ENABLE_BCLIB", True), define("LIBOMPTARGET_NVPTX_ENABLE_BCLIB", True),
# work around bad libelf detection in libomptarget # work around bad libelf detection in libomptarget
define("LIBOMPTARGET_DEP_LIBELF_INCLUDE_DIR", define(
spec["libelf"].prefix.include), "LIBOMPTARGET_DEP_LIBELF_INCLUDE_DIR", spec["libelf"].prefix.include
]) ),
]
)
else: else:
# still build libomptarget but disable cuda # still build libomptarget but disable cuda
cmake_args.extend([ cmake_args.extend(
[
define("CUDA_TOOLKIT_ROOT_DIR", "IGNORE"), define("CUDA_TOOLKIT_ROOT_DIR", "IGNORE"),
define("CUDA_SDK_ROOT_DIR", "IGNORE"), define("CUDA_SDK_ROOT_DIR", "IGNORE"),
define("CUDA_NVCC_EXECUTABLE", "IGNORE"), define("CUDA_NVCC_EXECUTABLE", "IGNORE"),
define("LIBOMPTARGET_DEP_CUDA_DRIVER_LIBRARIES", "IGNORE"), define("LIBOMPTARGET_DEP_CUDA_DRIVER_LIBRARIES", "IGNORE"),
]) ]
)
cmake_args.append(from_variant("LIBOMPTARGET_ENABLE_DEBUG", "omp_debug")) cmake_args.append(from_variant("LIBOMPTARGET_ENABLE_DEBUG", "omp_debug"))
if "+lldb" in spec: if "+lldb" in spec:
projects.append("lldb") projects.append("lldb")
cmake_args.append(define('LLDB_ENABLE_LIBEDIT', True)) cmake_args.append(define("LLDB_ENABLE_LIBEDIT", True))
cmake_args.append(define('LLDB_ENABLE_NCURSES', True)) cmake_args.append(define("LLDB_ENABLE_NCURSES", True))
cmake_args.append(define('LLDB_ENABLE_LIBXML2', False)) cmake_args.append(define("LLDB_ENABLE_LIBXML2", False))
if spec.version >= Version('10'): if spec.version >= Version("10"):
cmake_args.append(from_variant("LLDB_ENABLE_PYTHON", 'python')) cmake_args.append(from_variant("LLDB_ENABLE_PYTHON", "python"))
else: else:
cmake_args.append(define("LLDB_DISABLE_PYTHON", '~python' in spec)) cmake_args.append(define("LLDB_DISABLE_PYTHON", "~python" in spec))
if spec.satisfies("@5.0.0: +python"): if spec.satisfies("@5.0.0: +python"):
cmake_args.append(define("LLDB_USE_SYSTEM_SIX", True)) cmake_args.append(define("LLDB_USE_SYSTEM_SIX", True))
if "+gold" in spec: if "+gold" in spec:
cmake_args.append( cmake_args.append(define("LLVM_BINUTILS_INCDIR", spec["binutils"].prefix.include))
define("LLVM_BINUTILS_INCDIR", spec["binutils"].prefix.include)
)
if "+clang" in spec: if "+clang" in spec:
projects.append("clang") projects.append("clang")
@ -612,10 +633,10 @@ class Llvm(CMakePackage, CudaPackage):
else: else:
projects.append("openmp") projects.append("openmp")
if '@8' in spec: if "@8" in spec:
cmake_args.append(from_variant('CLANG_ANALYZER_ENABLE_Z3_SOLVER', 'z3')) cmake_args.append(from_variant("CLANG_ANALYZER_ENABLE_Z3_SOLVER", "z3"))
elif '@9:' in spec: elif "@9:" in spec:
cmake_args.append(from_variant('LLVM_ENABLE_Z3_SOLVER', 'z3')) cmake_args.append(from_variant("LLVM_ENABLE_Z3_SOLVER", "z3"))
if "+flang" in spec: if "+flang" in spec:
projects.append("flang") projects.append("flang")
@ -634,7 +655,8 @@ class Llvm(CMakePackage, CudaPackage):
projects.append("polly") projects.append("polly")
cmake_args.append(define("LINK_POLLY_INTO_TOOLS", True)) cmake_args.append(define("LINK_POLLY_INTO_TOOLS", True))
cmake_args.extend([ cmake_args.extend(
[
define("BUILD_SHARED_LIBS", False), define("BUILD_SHARED_LIBS", False),
from_variant("LLVM_BUILD_LLVM_DYLIB", "llvm_dylib"), from_variant("LLVM_BUILD_LLVM_DYLIB", "llvm_dylib"),
from_variant("LLVM_LINK_LLVM_DYLIB", "link_llvm_dylib"), from_variant("LLVM_LINK_LLVM_DYLIB", "link_llvm_dylib"),
@ -642,18 +664,17 @@ class Llvm(CMakePackage, CudaPackage):
# By default on Linux, libc++.so is a ldscript. CMake fails to add # By default on Linux, libc++.so is a ldscript. CMake fails to add
# CMAKE_INSTALL_RPATH to it, which fails. Statically link libc++abi.a # CMAKE_INSTALL_RPATH to it, which fails. Statically link libc++abi.a
# into libc++.so, linking with -lc++ or -stdlib=libc++ is enough. # into libc++.so, linking with -lc++ or -stdlib=libc++ is enough.
define('LIBCXX_ENABLE_STATIC_ABI_LIBRARY', True) define("LIBCXX_ENABLE_STATIC_ABI_LIBRARY", True),
]) ]
)
cmake_args.append(define( cmake_args.append(define("LLVM_TARGETS_TO_BUILD", get_llvm_targets_to_build(spec)))
"LLVM_TARGETS_TO_BUILD",
get_llvm_targets_to_build(spec)))
cmake_args.append(from_variant("LIBOMP_TSAN_SUPPORT", "omp_tsan")) cmake_args.append(from_variant("LIBOMP_TSAN_SUPPORT", "omp_tsan"))
if self.compiler.name == "gcc": if self.compiler.name == "gcc":
compiler = Executable(self.compiler.cc) compiler = Executable(self.compiler.cc)
gcc_output = compiler('-print-search-dirs', output=str, error=str) gcc_output = compiler("-print-search-dirs", output=str, error=str)
for line in gcc_output.splitlines(): for line in gcc_output.splitlines():
if line.startswith("install:"): if line.startswith("install:"):
@ -665,7 +686,7 @@ class Llvm(CMakePackage, CudaPackage):
cmake_args.append(define("GCC_INSTALL_PREFIX", gcc_prefix)) cmake_args.append(define("GCC_INSTALL_PREFIX", gcc_prefix))
if self.spec.satisfies("~code_signing platform=darwin"): if self.spec.satisfies("~code_signing platform=darwin"):
cmake_args.append(define('LLDB_USE_SYSTEM_DEBUGSERVER', True)) cmake_args.append(define("LLDB_USE_SYSTEM_DEBUGSERVER", True))
# Semicolon seperated list of projects to enable # Semicolon seperated list of projects to enable
cmake_args.append(define("LLVM_ENABLE_PROJECTS", projects)) cmake_args.append(define("LLVM_ENABLE_PROJECTS", projects))
@ -689,20 +710,24 @@ class Llvm(CMakePackage, CudaPackage):
# rebuild libomptarget to get bytecode runtime library files # rebuild libomptarget to get bytecode runtime library files
with working_dir(ompdir, create=True): with working_dir(ompdir, create=True):
cmake_args = [ cmake_args = [
'-G', 'Ninja', "-G",
define('CMAKE_BUILD_TYPE', spec.variants['build_type'].value), "Ninja",
define("CMAKE_BUILD_TYPE", spec.variants["build_type"].value),
define("CMAKE_C_COMPILER", spec.prefix.bin + "/clang"), define("CMAKE_C_COMPILER", spec.prefix.bin + "/clang"),
define("CMAKE_CXX_COMPILER", spec.prefix.bin + "/clang++"), define("CMAKE_CXX_COMPILER", spec.prefix.bin + "/clang++"),
define("CMAKE_INSTALL_PREFIX", spec.prefix), define("CMAKE_INSTALL_PREFIX", spec.prefix),
define('CMAKE_PREFIX_PATH', prefix_paths) define("CMAKE_PREFIX_PATH", prefix_paths),
] ]
cmake_args.extend(self.cmake_args()) cmake_args.extend(self.cmake_args())
cmake_args.extend([ cmake_args.extend(
[
define("LIBOMPTARGET_NVPTX_ENABLE_BCLIB", True), define("LIBOMPTARGET_NVPTX_ENABLE_BCLIB", True),
define("LIBOMPTARGET_DEP_LIBELF_INCLUDE_DIR", define(
spec["libelf"].prefix.include), "LIBOMPTARGET_DEP_LIBELF_INCLUDE_DIR", spec["libelf"].prefix.include
),
self.stage.source_path + "/openmp", self.stage.source_path + "/openmp",
]) ]
)
cmake(*cmake_args) cmake(*cmake_args)
ninja() ninja()
@ -717,22 +742,22 @@ class Llvm(CMakePackage, CudaPackage):
install_tree("bin", join_path(self.prefix, "libexec", "llvm")) install_tree("bin", join_path(self.prefix, "libexec", "llvm"))
def llvm_config(self, *args, **kwargs): def llvm_config(self, *args, **kwargs):
lc = Executable(self.prefix.bin.join('llvm-config')) lc = Executable(self.prefix.bin.join("llvm-config"))
if not kwargs.get('output'): if not kwargs.get("output"):
kwargs['output'] = str kwargs["output"] = str
ret = lc(*args, **kwargs) ret = lc(*args, **kwargs)
if kwargs.get('result') == "list": if kwargs.get("result") == "list":
return ret.split() return ret.split()
else: else:
return ret return ret
def get_llvm_targets_to_build(spec): def get_llvm_targets_to_build(spec):
targets = spec.variants['targets'].value targets = spec.variants["targets"].value
# Build everything? # Build everything?
if 'all' in targets: if "all" in targets:
return 'all' return "all"
# Convert targets variant values to CMake LLVM_TARGETS_TO_BUILD array. # Convert targets variant values to CMake LLVM_TARGETS_TO_BUILD array.
spack_to_cmake = { spack_to_cmake = {
@ -753,10 +778,10 @@ def get_llvm_targets_to_build(spec):
"systemz": "SystemZ", "systemz": "SystemZ",
"webassembly": "WebAssembly", "webassembly": "WebAssembly",
"x86": "X86", "x86": "X86",
"xcore": "XCore" "xcore": "XCore",
} }
if 'none' in targets: if "none" in targets:
llvm_targets = set() llvm_targets = set()
else: else:
llvm_targets = set(spack_to_cmake[target] for target in targets) llvm_targets = set(spack_to_cmake[target] for target in targets)

View file

@ -24,125 +24,138 @@ class PyTorch(PythonPackage, CudaPackage):
homepage = "https://pytorch.org/" homepage = "https://pytorch.org/"
git = "https://github.com/pytorch/pytorch.git" git = "https://github.com/pytorch/pytorch.git"
maintainers = ['adamjstewart'] maintainers("adamjstewart")
# Exact set of modules is version- and variant-specific, just attempt to import the # Exact set of modules is version- and variant-specific, just attempt to import the
# core libraries to ensure that the package was successfully installed. # core libraries to ensure that the package was successfully installed.
import_modules = ['torch', 'torch.autograd', 'torch.nn', 'torch.utils'] import_modules = ["torch", "torch.autograd", "torch.nn", "torch.utils"]
version('master', branch='master', submodules=True) version("master", branch="master", submodules=True)
version('1.10.1', tag='v1.10.1', submodules=True) version("1.10.1", tag="v1.10.1", submodules=True)
version('1.10.0', tag='v1.10.0', submodules=True) version("1.10.0", tag="v1.10.0", submodules=True)
version('1.9.1', tag='v1.9.1', submodules=True) version("1.9.1", tag="v1.9.1", submodules=True)
version('1.9.0', tag='v1.9.0', submodules=True) version("1.9.0", tag="v1.9.0", submodules=True)
version('1.8.2', tag='v1.8.2', submodules=True) version("1.8.2", tag="v1.8.2", submodules=True)
version('1.8.1', tag='v1.8.1', submodules=True) version("1.8.1", tag="v1.8.1", submodules=True)
version('1.8.0', tag='v1.8.0', submodules=True) version("1.8.0", tag="v1.8.0", submodules=True)
version('1.7.1', tag='v1.7.1', submodules=True) version("1.7.1", tag="v1.7.1", submodules=True)
version('1.7.0', tag='v1.7.0', submodules=True) version("1.7.0", tag="v1.7.0", submodules=True)
version('1.6.0', tag='v1.6.0', submodules=True) version("1.6.0", tag="v1.6.0", submodules=True)
version('1.5.1', tag='v1.5.1', submodules=True) version("1.5.1", tag="v1.5.1", submodules=True)
version('1.5.0', tag='v1.5.0', submodules=True) version("1.5.0", tag="v1.5.0", submodules=True)
version('1.4.1', tag='v1.4.1', submodules=True) version("1.4.1", tag="v1.4.1", submodules=True)
version('1.4.0', tag='v1.4.0', submodules=True, deprecated=True, version(
submodules_delete=['third_party/fbgemm']) "1.4.0",
version('1.3.1', tag='v1.3.1', submodules=True) tag="v1.4.0",
version('1.3.0', tag='v1.3.0', submodules=True) submodules=True,
version('1.2.0', tag='v1.2.0', submodules=True) deprecated=True,
version('1.1.0', tag='v1.1.0', submodules=True) submodules_delete=["third_party/fbgemm"],
version('1.0.1', tag='v1.0.1', submodules=True) )
version('1.0.0', tag='v1.0.0', submodules=True) version("1.3.1", tag="v1.3.1", submodules=True)
version('0.4.1', tag='v0.4.1', submodules=True, deprecated=True, version("1.3.0", tag="v1.3.0", submodules=True)
submodules_delete=['third_party/nervanagpu']) version("1.2.0", tag="v1.2.0", submodules=True)
version('0.4.0', tag='v0.4.0', submodules=True, deprecated=True) version("1.1.0", tag="v1.1.0", submodules=True)
version('0.3.1', tag='v0.3.1', submodules=True, deprecated=True) version("1.0.1", tag="v1.0.1", submodules=True)
version("1.0.0", tag="v1.0.0", submodules=True)
version(
"0.4.1",
tag="v0.4.1",
submodules=True,
deprecated=True,
submodules_delete=["third_party/nervanagpu"],
)
version("0.4.0", tag="v0.4.0", submodules=True, deprecated=True)
version("0.3.1", tag="v0.3.1", submodules=True, deprecated=True)
is_darwin = sys.platform == 'darwin' is_darwin = sys.platform == "darwin"
# All options are defined in CMakeLists.txt. # All options are defined in CMakeLists.txt.
# Some are listed in setup.py, but not all. # Some are listed in setup.py, but not all.
variant('caffe2', default=True, description='Build Caffe2') variant("caffe2", default=True, description="Build Caffe2")
variant('test', default=False, description='Build C++ test binaries') variant("test", default=False, description="Build C++ test binaries")
variant('cuda', default=not is_darwin, description='Use CUDA') variant("cuda", default=not is_darwin, description="Use CUDA")
variant('rocm', default=False, description='Use ROCm') variant("rocm", default=False, description="Use ROCm")
variant('cudnn', default=not is_darwin, description='Use cuDNN') variant("cudnn", default=not is_darwin, description="Use cuDNN")
variant('fbgemm', default=True, description='Use FBGEMM (quantized 8-bit server operators)') variant("fbgemm", default=True, description="Use FBGEMM (quantized 8-bit server operators)")
variant('kineto', default=True, description='Use Kineto profiling library') variant("kineto", default=True, description="Use Kineto profiling library")
variant('magma', default=not is_darwin, description='Use MAGMA') variant("magma", default=not is_darwin, description="Use MAGMA")
variant('metal', default=is_darwin, description='Use Metal for Caffe2 iOS build') variant("metal", default=is_darwin, description="Use Metal for Caffe2 iOS build")
variant('nccl', default=not is_darwin, description='Use NCCL') variant("nccl", default=not is_darwin, description="Use NCCL")
variant('nnpack', default=True, description='Use NNPACK') variant("nnpack", default=True, description="Use NNPACK")
variant('numa', default=not is_darwin, description='Use NUMA') variant("numa", default=not is_darwin, description="Use NUMA")
variant('numpy', default=True, description='Use NumPy') variant("numpy", default=True, description="Use NumPy")
variant('openmp', default=True, description='Use OpenMP for parallel code') variant("openmp", default=True, description="Use OpenMP for parallel code")
variant('qnnpack', default=True, description='Use QNNPACK (quantized 8-bit operators)') variant("qnnpack", default=True, description="Use QNNPACK (quantized 8-bit operators)")
variant('valgrind', default=not is_darwin, description='Use Valgrind') variant("valgrind", default=not is_darwin, description="Use Valgrind")
variant('xnnpack', default=True, description='Use XNNPACK') variant("xnnpack", default=True, description="Use XNNPACK")
variant('mkldnn', default=True, description='Use MKLDNN') variant("mkldnn", default=True, description="Use MKLDNN")
variant('distributed', default=not is_darwin, description='Use distributed') variant("distributed", default=not is_darwin, description="Use distributed")
variant('mpi', default=not is_darwin, description='Use MPI for Caffe2') variant("mpi", default=not is_darwin, description="Use MPI for Caffe2")
variant('gloo', default=not is_darwin, description='Use Gloo') variant("gloo", default=not is_darwin, description="Use Gloo")
variant('tensorpipe', default=not is_darwin, description='Use TensorPipe') variant("tensorpipe", default=not is_darwin, description="Use TensorPipe")
variant('onnx_ml', default=True, description='Enable traditional ONNX ML API') variant("onnx_ml", default=True, description="Enable traditional ONNX ML API")
variant('breakpad', default=True, description='Enable breakpad crash dump library') variant("breakpad", default=True, description="Enable breakpad crash dump library")
conflicts('+cuda', when='+rocm') conflicts("+cuda", when="+rocm")
conflicts('+cudnn', when='~cuda') conflicts("+cudnn", when="~cuda")
conflicts('+magma', when='~cuda') conflicts("+magma", when="~cuda")
conflicts('+nccl', when='~cuda~rocm') conflicts("+nccl", when="~cuda~rocm")
conflicts('+nccl', when='platform=darwin') conflicts("+nccl", when="platform=darwin")
conflicts('+numa', when='platform=darwin', msg='Only available on Linux') conflicts("+numa", when="platform=darwin", msg="Only available on Linux")
conflicts('+valgrind', when='platform=darwin', msg='Only available on Linux') conflicts("+valgrind", when="platform=darwin", msg="Only available on Linux")
conflicts('+mpi', when='~distributed') conflicts("+mpi", when="~distributed")
conflicts('+gloo', when='~distributed') conflicts("+gloo", when="~distributed")
conflicts('+tensorpipe', when='~distributed') conflicts("+tensorpipe", when="~distributed")
conflicts('+kineto', when='@:1.7') conflicts("+kineto", when="@:1.7")
conflicts('+valgrind', when='@:1.7') conflicts("+valgrind", when="@:1.7")
conflicts('~caffe2', when='@0.4.0:1.6') # no way to disable caffe2? conflicts("~caffe2", when="@0.4.0:1.6") # no way to disable caffe2?
conflicts('+caffe2', when='@:0.3.1') # caffe2 did not yet exist? conflicts("+caffe2", when="@:0.3.1") # caffe2 did not yet exist?
conflicts('+tensorpipe', when='@:1.5') conflicts("+tensorpipe", when="@:1.5")
conflicts('+xnnpack', when='@:1.4') conflicts("+xnnpack", when="@:1.4")
conflicts('~onnx_ml', when='@:1.4') # no way to disable ONNX? conflicts("~onnx_ml", when="@:1.4") # no way to disable ONNX?
conflicts('+rocm', when='@:0.4') conflicts("+rocm", when="@:0.4")
conflicts('+cudnn', when='@:0.4') conflicts("+cudnn", when="@:0.4")
conflicts('+fbgemm', when='@:0.4,1.4.0') conflicts("+fbgemm", when="@:0.4,1.4.0")
conflicts('+qnnpack', when='@:0.4') conflicts("+qnnpack", when="@:0.4")
conflicts('+mkldnn', when='@:0.4') conflicts("+mkldnn", when="@:0.4")
conflicts('+breakpad', when='@:1.9') # Option appeared in 1.10.0 conflicts("+breakpad", when="@:1.9") # Option appeared in 1.10.0
conflicts('+breakpad', when='target=ppc64:', msg='Unsupported') conflicts("+breakpad", when="target=ppc64:", msg="Unsupported")
conflicts('+breakpad', when='target=ppc64le:', msg='Unsupported') conflicts("+breakpad", when="target=ppc64le:", msg="Unsupported")
conflicts('cuda_arch=none', when='+cuda', conflicts(
msg='Must specify CUDA compute capabilities of your GPU, see ' "cuda_arch=none",
'https://developer.nvidia.com/cuda-gpus') when="+cuda",
msg="Must specify CUDA compute capabilities of your GPU, see "
"https://developer.nvidia.com/cuda-gpus",
)
# Required dependencies # Required dependencies
depends_on('cmake@3.5:', type='build') depends_on("cmake@3.5:", type="build")
# Use Ninja generator to speed up build times, automatically used if found # Use Ninja generator to speed up build times, automatically used if found
depends_on('ninja@1.5:', when='@1.1.0:', type='build') depends_on("ninja@1.5:", when="@1.1.0:", type="build")
# See python_min_version in setup.py # See python_min_version in setup.py
depends_on('python@3.6.2:', when='@1.7.1:', type=('build', 'link', 'run')) depends_on("python@3.6.2:", when="@1.7.1:", type=("build", "link", "run"))
depends_on('python@3.6.1:', when='@1.6.0:1.7.0', type=('build', 'link', 'run')) depends_on("python@3.6.1:", when="@1.6.0:1.7.0", type=("build", "link", "run"))
depends_on('python@3.5:', when='@1.5.0:1.5', type=('build', 'link', 'run')) depends_on("python@3.5:", when="@1.5.0:1.5", type=("build", "link", "run"))
depends_on('python@2.7:2.8,3.5:', when='@1.4.0:1.4', type=('build', 'link', 'run')) depends_on("python@2.7:2.8,3.5:", when="@1.4.0:1.4", type=("build", "link", "run"))
depends_on('python@2.7:2.8,3.5:3.7', when='@:1.3', type=('build', 'link', 'run')) depends_on("python@2.7:2.8,3.5:3.7", when="@:1.3", type=("build", "link", "run"))
depends_on('py-setuptools', type=('build', 'run')) depends_on("py-setuptools", type=("build", "run"))
depends_on('py-future', when='@1.5:', type=('build', 'run')) depends_on("py-future", when="@1.5:", type=("build", "run"))
depends_on('py-future', when='@1.1: ^python@:2', type=('build', 'run')) depends_on("py-future", when="@1.1: ^python@:2", type=("build", "run"))
depends_on('py-pyyaml', type=('build', 'run')) depends_on("py-pyyaml", type=("build", "run"))
depends_on('py-typing', when='@0.4: ^python@:3.4', type=('build', 'run')) depends_on("py-typing", when="@0.4: ^python@:3.4", type=("build", "run"))
depends_on('py-typing-extensions', when='@1.7:', type=('build', 'run')) depends_on("py-typing-extensions", when="@1.7:", type=("build", "run"))
depends_on('py-pybind11@2.6.2', when='@1.8.0:', type=('build', 'link', 'run')) depends_on("py-pybind11@2.6.2", when="@1.8.0:", type=("build", "link", "run"))
depends_on('py-pybind11@2.3.0', when='@1.1.0:1.7', type=('build', 'link', 'run')) depends_on("py-pybind11@2.3.0", when="@1.1.0:1.7", type=("build", "link", "run"))
depends_on('py-pybind11@2.2.4', when='@1.0.0:1.0', type=('build', 'link', 'run')) depends_on("py-pybind11@2.2.4", when="@1.0.0:1.0", type=("build", "link", "run"))
depends_on('py-pybind11@2.2.2', when='@0.4.0:0.4', type=('build', 'link', 'run')) depends_on("py-pybind11@2.2.2", when="@0.4.0:0.4", type=("build", "link", "run"))
depends_on('py-dataclasses', when='@1.7: ^python@3.6.0:3.6', type=('build', 'run')) depends_on("py-dataclasses", when="@1.7: ^python@3.6.0:3.6", type=("build", "run"))
depends_on('py-tqdm', type='run') depends_on("py-tqdm", type="run")
depends_on('py-protobuf', when='@0.4:', type=('build', 'run')) depends_on("py-protobuf", when="@0.4:", type=("build", "run"))
depends_on('protobuf', when='@0.4:') depends_on("protobuf", when="@0.4:")
depends_on('blas') depends_on("blas")
depends_on('lapack') depends_on("lapack")
depends_on('eigen', when='@0.4:') depends_on("eigen", when="@0.4:")
# https://github.com/pytorch/pytorch/issues/60329 # https://github.com/pytorch/pytorch/issues/60329
# depends_on('cpuinfo@2020-12-17', when='@1.8.0:') # depends_on('cpuinfo@2020-12-17', when='@1.8.0:')
# depends_on('cpuinfo@2020-06-11', when='@1.6.0:1.7') # depends_on('cpuinfo@2020-06-11', when='@1.6.0:1.7')
@ -152,30 +165,30 @@ class PyTorch(PythonPackage, CudaPackage):
# depends_on('sleef@3.4.0_2019-07-30', when='@1.6.0:1.7') # depends_on('sleef@3.4.0_2019-07-30', when='@1.6.0:1.7')
# https://github.com/Maratyszcza/FP16/issues/18 # https://github.com/Maratyszcza/FP16/issues/18
# depends_on('fp16@2020-05-14', when='@1.6.0:') # depends_on('fp16@2020-05-14', when='@1.6.0:')
depends_on('pthreadpool@2021-04-13', when='@1.9.0:') depends_on("pthreadpool@2021-04-13", when="@1.9.0:")
depends_on('pthreadpool@2020-10-05', when='@1.8.0:1.8') depends_on("pthreadpool@2020-10-05", when="@1.8.0:1.8")
depends_on('pthreadpool@2020-06-15', when='@1.6.0:1.7') depends_on("pthreadpool@2020-06-15", when="@1.6.0:1.7")
depends_on('psimd@2020-05-17', when='@1.6.0:') depends_on("psimd@2020-05-17", when="@1.6.0:")
depends_on('fxdiv@2020-04-17', when='@1.6.0:') depends_on("fxdiv@2020-04-17", when="@1.6.0:")
depends_on('benchmark', when='@1.6:+test') depends_on("benchmark", when="@1.6:+test")
# Optional dependencies # Optional dependencies
depends_on('cuda@7.5:', when='+cuda', type=('build', 'link', 'run')) depends_on("cuda@7.5:", when="+cuda", type=("build", "link", "run"))
depends_on('cuda@9:', when='@1.1:+cuda', type=('build', 'link', 'run')) depends_on("cuda@9:", when="@1.1:+cuda", type=("build", "link", "run"))
depends_on('cuda@9.2:', when='@1.6:+cuda', type=('build', 'link', 'run')) depends_on("cuda@9.2:", when="@1.6:+cuda", type=("build", "link", "run"))
depends_on('cudnn@6.0:7', when='@:1.0+cudnn') depends_on("cudnn@6.0:7", when="@:1.0+cudnn")
depends_on('cudnn@7.0:7', when='@1.1.0:1.5+cudnn') depends_on("cudnn@7.0:7", when="@1.1.0:1.5+cudnn")
depends_on('cudnn@7.0:', when='@1.6.0:+cudnn') depends_on("cudnn@7.0:", when="@1.6.0:+cudnn")
depends_on('magma', when='+magma') depends_on("magma", when="+magma")
depends_on('nccl', when='+nccl') depends_on("nccl", when="+nccl")
depends_on('numactl', when='+numa') depends_on("numactl", when="+numa")
depends_on('py-numpy', when='+numpy', type=('build', 'run')) depends_on("py-numpy", when="+numpy", type=("build", "run"))
depends_on('llvm-openmp', when='%apple-clang +openmp') depends_on("llvm-openmp", when="%apple-clang +openmp")
depends_on('valgrind', when='+valgrind') depends_on("valgrind", when="+valgrind")
# https://github.com/pytorch/pytorch/issues/60332 # https://github.com/pytorch/pytorch/issues/60332
# depends_on('xnnpack@2021-02-22', when='@1.8.0:+xnnpack') # depends_on('xnnpack@2021-02-22', when='@1.8.0:+xnnpack')
# depends_on('xnnpack@2020-03-23', when='@1.6.0:1.7+xnnpack') # depends_on('xnnpack@2020-03-23', when='@1.6.0:1.7+xnnpack')
depends_on('mpi', when='+mpi') depends_on("mpi", when="+mpi")
# https://github.com/pytorch/pytorch/issues/60270 # https://github.com/pytorch/pytorch/issues/60270
# depends_on('gloo@2021-05-04', when='@1.9.0:+gloo') # depends_on('gloo@2021-05-04', when='@1.9.0:+gloo')
# depends_on('gloo@2020-09-18', when='@1.7.0:1.8+gloo') # depends_on('gloo@2020-09-18', when='@1.7.0:1.8+gloo')
@ -183,31 +196,35 @@ class PyTorch(PythonPackage, CudaPackage):
# https://github.com/pytorch/pytorch/issues/60331 # https://github.com/pytorch/pytorch/issues/60331
# depends_on('onnx@1.8.0_2020-11-03', when='@1.8.0:+onnx_ml') # depends_on('onnx@1.8.0_2020-11-03', when='@1.8.0:+onnx_ml')
# depends_on('onnx@1.7.0_2020-05-31', when='@1.6.0:1.7+onnx_ml') # depends_on('onnx@1.7.0_2020-05-31', when='@1.6.0:1.7+onnx_ml')
depends_on('mkl', when='+mkldnn') depends_on("mkl", when="+mkldnn")
# Test dependencies # Test dependencies
depends_on('py-hypothesis', type='test') depends_on("py-hypothesis", type="test")
depends_on('py-six', type='test') depends_on("py-six", type="test")
depends_on('py-psutil', type='test') depends_on("py-psutil", type="test")
# Fix BLAS being overridden by MKL # Fix BLAS being overridden by MKL
# https://github.com/pytorch/pytorch/issues/60328 # https://github.com/pytorch/pytorch/issues/60328
patch('https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/59220.patch', patch(
sha256='e37afffe45cf7594c22050109942370e49983ad772d12ebccf508377dc9dcfc9', "https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/59220.patch",
when='@1.2.0:') sha256="e37afffe45cf7594c22050109942370e49983ad772d12ebccf508377dc9dcfc9",
when="@1.2.0:",
)
# Fixes build on older systems with glibc <2.12 # Fixes build on older systems with glibc <2.12
patch('https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/55063.patch', patch(
sha256='e17eaa42f5d7c18bf0d7c37d7b0910127a01ad53fdce3e226a92893356a70395', "https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/55063.patch",
when='@1.1.0:1.8.1') sha256="e17eaa42f5d7c18bf0d7c37d7b0910127a01ad53fdce3e226a92893356a70395",
when="@1.1.0:1.8.1",
)
# Fixes CMake configuration error when XNNPACK is disabled # Fixes CMake configuration error when XNNPACK is disabled
# https://github.com/pytorch/pytorch/pull/35607 # https://github.com/pytorch/pytorch/pull/35607
# https://github.com/pytorch/pytorch/pull/37865 # https://github.com/pytorch/pytorch/pull/37865
patch('xnnpack.patch', when='@1.5.0:1.5') patch("xnnpack.patch", when="@1.5.0:1.5")
# Fixes build error when ROCm is enabled for pytorch-1.5 release # Fixes build error when ROCm is enabled for pytorch-1.5 release
patch('rocm.patch', when='@1.5.0:1.5+rocm') patch("rocm.patch", when="@1.5.0:1.5+rocm")
# Fixes fatal error: sleef.h: No such file or directory # Fixes fatal error: sleef.h: No such file or directory
# https://github.com/pytorch/pytorch/pull/35359 # https://github.com/pytorch/pytorch/pull/35359
@ -216,47 +233,56 @@ class PyTorch(PythonPackage, CudaPackage):
# Fixes compilation with Clang 9.0.0 and Apple Clang 11.0.3 # Fixes compilation with Clang 9.0.0 and Apple Clang 11.0.3
# https://github.com/pytorch/pytorch/pull/37086 # https://github.com/pytorch/pytorch/pull/37086
patch('https://github.com/pytorch/pytorch/commit/e921cd222a8fbeabf5a3e74e83e0d8dfb01aa8b5.patch', patch(
sha256='17561b16cd2db22f10c0fe1fdcb428aecb0ac3964ba022a41343a6bb8cba7049', "https://github.com/pytorch/pytorch/commit/e921cd222a8fbeabf5a3e74e83e0d8dfb01aa8b5.patch",
when='@1.1:1.5') sha256="17561b16cd2db22f10c0fe1fdcb428aecb0ac3964ba022a41343a6bb8cba7049",
when="@1.1:1.5",
)
# Removes duplicate definition of getCusparseErrorString # Removes duplicate definition of getCusparseErrorString
# https://github.com/pytorch/pytorch/issues/32083 # https://github.com/pytorch/pytorch/issues/32083
patch('cusparseGetErrorString.patch', when='@0.4.1:1.0^cuda@10.1.243:') patch("cusparseGetErrorString.patch", when="@0.4.1:1.0^cuda@10.1.243:")
# Fixes 'FindOpenMP.cmake' # Fixes 'FindOpenMP.cmake'
# to detect openmp settings used by Fujitsu compiler. # to detect openmp settings used by Fujitsu compiler.
patch('detect_omp_of_fujitsu_compiler.patch', when='%fj') patch("detect_omp_of_fujitsu_compiler.patch", when="%fj")
# Fix compilation of +distributed~tensorpipe # Fix compilation of +distributed~tensorpipe
# https://github.com/pytorch/pytorch/issues/68002 # https://github.com/pytorch/pytorch/issues/68002
patch('https://github.com/pytorch/pytorch/commit/c075f0f633fa0136e68f0a455b5b74d7b500865c.patch', patch(
sha256='e69e41b5c171bfb00d1b5d4ee55dd5e4c8975483230274af4ab461acd37e40b8', when='@1.10.0+distributed~tensorpipe') "https://github.com/pytorch/pytorch/commit/c075f0f633fa0136e68f0a455b5b74d7b500865c.patch",
sha256="e69e41b5c171bfb00d1b5d4ee55dd5e4c8975483230274af4ab461acd37e40b8",
when="@1.10.0+distributed~tensorpipe",
)
# Both build and install run cmake/make/make install # Both build and install run cmake/make/make install
# Only run once to speed up build times # Only run once to speed up build times
phases = ['install'] phases = ["install"]
@property @property
def libs(self): def libs(self):
root = join_path(self.prefix, self.spec['python'].package.site_packages_dir, root = join_path(
'torch', 'lib') self.prefix, self.spec["python"].package.site_packages_dir, "torch", "lib"
return find_libraries('libtorch', root) )
return find_libraries("libtorch", root)
@property @property
def headers(self): def headers(self):
root = join_path(self.prefix, self.spec['python'].package.site_packages_dir, root = join_path(
'torch', 'include') self.prefix, self.spec["python"].package.site_packages_dir, "torch", "include"
)
headers = find_all_headers(root) headers = find_all_headers(root)
headers.directories = [root] headers.directories = [root]
return headers return headers
@when('@1.5.0:') @when("@1.5.0:")
def patch(self): def patch(self):
# https://github.com/pytorch/pytorch/issues/52208 # https://github.com/pytorch/pytorch/issues/52208
filter_file('torch_global_deps PROPERTIES LINKER_LANGUAGE C', filter_file(
'torch_global_deps PROPERTIES LINKER_LANGUAGE CXX', "torch_global_deps PROPERTIES LINKER_LANGUAGE C",
'caffe2/CMakeLists.txt') "torch_global_deps PROPERTIES LINKER_LANGUAGE CXX",
"caffe2/CMakeLists.txt",
)
def setup_build_environment(self, env): def setup_build_environment(self, env):
"""Set environment variables used to control the build. """Set environment variables used to control the build.
@ -269,7 +295,8 @@ class PyTorch(PythonPackage, CudaPackage):
most flags defined in ``CMakeLists.txt`` can be specified as most flags defined in ``CMakeLists.txt`` can be specified as
environment variables. environment variables.
""" """
def enable_or_disable(variant, keyword='USE', var=None, newer=False):
def enable_or_disable(variant, keyword="USE", var=None, newer=False):
"""Set environment variable to enable or disable support for a """Set environment variable to enable or disable support for a
particular variant. particular variant.
@ -284,137 +311,135 @@ class PyTorch(PythonPackage, CudaPackage):
# Version 1.1.0 switched from NO_* to USE_* or BUILD_* # Version 1.1.0 switched from NO_* to USE_* or BUILD_*
# But some newer variants have always used USE_* or BUILD_* # But some newer variants have always used USE_* or BUILD_*
if self.spec.satisfies('@1.1:') or newer: if self.spec.satisfies("@1.1:") or newer:
if '+' + variant in self.spec: if "+" + variant in self.spec:
env.set(keyword + '_' + var, 'ON') env.set(keyword + "_" + var, "ON")
else: else:
env.set(keyword + '_' + var, 'OFF') env.set(keyword + "_" + var, "OFF")
else: else:
if '+' + variant in self.spec: if "+" + variant in self.spec:
env.unset('NO_' + var) env.unset("NO_" + var)
else: else:
env.set('NO_' + var, 'ON') env.set("NO_" + var, "ON")
# Build in parallel to speed up build times # Build in parallel to speed up build times
env.set('MAX_JOBS', make_jobs) env.set("MAX_JOBS", make_jobs)
# Spack logs have trouble handling colored output # Spack logs have trouble handling colored output
env.set('COLORIZE_OUTPUT', 'OFF') env.set("COLORIZE_OUTPUT", "OFF")
if self.spec.satisfies('@0.4:'): if self.spec.satisfies("@0.4:"):
enable_or_disable('test', keyword='BUILD') enable_or_disable("test", keyword="BUILD")
if self.spec.satisfies('@1.7:'): if self.spec.satisfies("@1.7:"):
enable_or_disable('caffe2', keyword='BUILD') enable_or_disable("caffe2", keyword="BUILD")
enable_or_disable('cuda') enable_or_disable("cuda")
if '+cuda' in self.spec: if "+cuda" in self.spec:
# cmake/public/cuda.cmake # cmake/public/cuda.cmake
# cmake/Modules_CUDA_fix/upstream/FindCUDA.cmake # cmake/Modules_CUDA_fix/upstream/FindCUDA.cmake
env.unset('CUDA_ROOT') env.unset("CUDA_ROOT")
torch_cuda_arch = ';'.join('{0:.1f}'.format(float(i) / 10.0) for i torch_cuda_arch = ";".join(
in "{0:.1f}".format(float(i) / 10.0) for i in self.spec.variants["cuda_arch"].value
self.spec.variants['cuda_arch'].value) )
env.set('TORCH_CUDA_ARCH_LIST', torch_cuda_arch) env.set("TORCH_CUDA_ARCH_LIST", torch_cuda_arch)
enable_or_disable('rocm') enable_or_disable("rocm")
enable_or_disable('cudnn') enable_or_disable("cudnn")
if '+cudnn' in self.spec: if "+cudnn" in self.spec:
# cmake/Modules_CUDA_fix/FindCUDNN.cmake # cmake/Modules_CUDA_fix/FindCUDNN.cmake
env.set('CUDNN_INCLUDE_DIR', self.spec['cudnn'].prefix.include) env.set("CUDNN_INCLUDE_DIR", self.spec["cudnn"].prefix.include)
env.set('CUDNN_LIBRARY', self.spec['cudnn'].libs[0]) env.set("CUDNN_LIBRARY", self.spec["cudnn"].libs[0])
enable_or_disable('fbgemm') enable_or_disable("fbgemm")
if self.spec.satisfies('@1.8:'): if self.spec.satisfies("@1.8:"):
enable_or_disable('kineto') enable_or_disable("kineto")
enable_or_disable('magma') enable_or_disable("magma")
enable_or_disable('metal') enable_or_disable("metal")
if self.spec.satisfies('@1.10:'): if self.spec.satisfies("@1.10:"):
enable_or_disable('breakpad') enable_or_disable("breakpad")
enable_or_disable('nccl') enable_or_disable("nccl")
if '+nccl' in self.spec: if "+nccl" in self.spec:
env.set('NCCL_LIB_DIR', self.spec['nccl'].libs.directories[0]) env.set("NCCL_LIB_DIR", self.spec["nccl"].libs.directories[0])
env.set('NCCL_INCLUDE_DIR', self.spec['nccl'].prefix.include) env.set("NCCL_INCLUDE_DIR", self.spec["nccl"].prefix.include)
# cmake/External/nnpack.cmake # cmake/External/nnpack.cmake
enable_or_disable('nnpack') enable_or_disable("nnpack")
enable_or_disable('numa') enable_or_disable("numa")
if '+numa' in self.spec: if "+numa" in self.spec:
# cmake/Modules/FindNuma.cmake # cmake/Modules/FindNuma.cmake
env.set('NUMA_ROOT_DIR', self.spec['numactl'].prefix) env.set("NUMA_ROOT_DIR", self.spec["numactl"].prefix)
# cmake/Modules/FindNumPy.cmake # cmake/Modules/FindNumPy.cmake
enable_or_disable('numpy') enable_or_disable("numpy")
# cmake/Modules/FindOpenMP.cmake # cmake/Modules/FindOpenMP.cmake
enable_or_disable('openmp', newer=True) enable_or_disable("openmp", newer=True)
enable_or_disable('qnnpack') enable_or_disable("qnnpack")
if self.spec.satisfies('@1.3:'): if self.spec.satisfies("@1.3:"):
enable_or_disable('qnnpack', var='PYTORCH_QNNPACK') enable_or_disable("qnnpack", var="PYTORCH_QNNPACK")
if self.spec.satisfies('@1.8:'): if self.spec.satisfies("@1.8:"):
enable_or_disable('valgrind') enable_or_disable("valgrind")
if self.spec.satisfies('@1.5:'): if self.spec.satisfies("@1.5:"):
enable_or_disable('xnnpack') enable_or_disable("xnnpack")
enable_or_disable('mkldnn') enable_or_disable("mkldnn")
enable_or_disable('distributed') enable_or_disable("distributed")
enable_or_disable('mpi') enable_or_disable("mpi")
# cmake/Modules/FindGloo.cmake # cmake/Modules/FindGloo.cmake
enable_or_disable('gloo', newer=True) enable_or_disable("gloo", newer=True)
if self.spec.satisfies('@1.6:'): if self.spec.satisfies("@1.6:"):
enable_or_disable('tensorpipe') enable_or_disable("tensorpipe")
if '+onnx_ml' in self.spec: if "+onnx_ml" in self.spec:
env.set('ONNX_ML', 'ON') env.set("ONNX_ML", "ON")
else: else:
env.set('ONNX_ML', 'OFF') env.set("ONNX_ML", "OFF")
if not self.spec.satisfies('@master'): if not self.spec.satisfies("@master"):
env.set('PYTORCH_BUILD_VERSION', self.version) env.set("PYTORCH_BUILD_VERSION", self.version)
env.set('PYTORCH_BUILD_NUMBER', 0) env.set("PYTORCH_BUILD_NUMBER", 0)
# BLAS to be used by Caffe2 # BLAS to be used by Caffe2
# Options defined in cmake/Dependencies.cmake and cmake/Modules/FindBLAS.cmake # Options defined in cmake/Dependencies.cmake and cmake/Modules/FindBLAS.cmake
if self.spec['blas'].name == 'atlas': if self.spec["blas"].name == "atlas":
env.set('BLAS', 'ATLAS') env.set("BLAS", "ATLAS")
env.set('WITH_BLAS', 'atlas') env.set("WITH_BLAS", "atlas")
elif self.spec['blas'].name in ['blis', 'amdblis']: elif self.spec["blas"].name in ["blis", "amdblis"]:
env.set('BLAS', 'BLIS') env.set("BLAS", "BLIS")
env.set('WITH_BLAS', 'blis') env.set("WITH_BLAS", "blis")
elif self.spec['blas'].name == 'eigen': elif self.spec["blas"].name == "eigen":
env.set('BLAS', 'Eigen') env.set("BLAS", "Eigen")
elif self.spec['lapack'].name in ['libflame', 'amdlibflame']: elif self.spec["lapack"].name in ["libflame", "amdlibflame"]:
env.set('BLAS', 'FLAME') env.set("BLAS", "FLAME")
env.set('WITH_BLAS', 'FLAME') env.set("WITH_BLAS", "FLAME")
elif self.spec['blas'].name in [ elif self.spec["blas"].name in ["intel-mkl", "intel-parallel-studio", "intel-oneapi-mkl"]:
'intel-mkl', 'intel-parallel-studio', 'intel-oneapi-mkl']: env.set("BLAS", "MKL")
env.set('BLAS', 'MKL') env.set("WITH_BLAS", "mkl")
env.set('WITH_BLAS', 'mkl') elif self.spec["blas"].name == "openblas":
elif self.spec['blas'].name == 'openblas': env.set("BLAS", "OpenBLAS")
env.set('BLAS', 'OpenBLAS') env.set("WITH_BLAS", "open")
env.set('WITH_BLAS', 'open') elif self.spec["blas"].name == "veclibfort":
elif self.spec['blas'].name == 'veclibfort': env.set("BLAS", "vecLib")
env.set('BLAS', 'vecLib') env.set("WITH_BLAS", "veclib")
env.set('WITH_BLAS', 'veclib')
else: else:
env.set('BLAS', 'Generic') env.set("BLAS", "Generic")
env.set('WITH_BLAS', 'generic') env.set("WITH_BLAS", "generic")
# Don't use vendored third-party libraries when possible # Don't use vendored third-party libraries when possible
env.set('BUILD_CUSTOM_PROTOBUF', 'OFF') env.set("BUILD_CUSTOM_PROTOBUF", "OFF")
env.set('USE_SYSTEM_NCCL', 'ON') env.set("USE_SYSTEM_NCCL", "ON")
env.set('USE_SYSTEM_EIGEN_INSTALL', 'ON') env.set("USE_SYSTEM_EIGEN_INSTALL", "ON")
if self.spec.satisfies('@0.4:'): if self.spec.satisfies("@0.4:"):
env.set('pybind11_DIR', self.spec['py-pybind11'].prefix) env.set("pybind11_DIR", self.spec["py-pybind11"].prefix)
env.set('pybind11_INCLUDE_DIR', env.set("pybind11_INCLUDE_DIR", self.spec["py-pybind11"].prefix.include)
self.spec['py-pybind11'].prefix.include) if self.spec.satisfies("@1.10:"):
if self.spec.satisfies('@1.10:'): env.set("USE_SYSTEM_PYBIND11", "ON")
env.set('USE_SYSTEM_PYBIND11', 'ON')
# https://github.com/pytorch/pytorch/issues/60334 # https://github.com/pytorch/pytorch/issues/60334
# if self.spec.satisfies('@1.8:'): # if self.spec.satisfies('@1.8:'):
# env.set('USE_SYSTEM_SLEEF', 'ON') # env.set('USE_SYSTEM_SLEEF', 'ON')
if self.spec.satisfies('@1.6:'): if self.spec.satisfies("@1.6:"):
# env.set('USE_SYSTEM_LIBS', 'ON') # env.set('USE_SYSTEM_LIBS', 'ON')
# https://github.com/pytorch/pytorch/issues/60329 # https://github.com/pytorch/pytorch/issues/60329
# env.set('USE_SYSTEM_CPUINFO', 'ON') # env.set('USE_SYSTEM_CPUINFO', 'ON')
@ -422,27 +447,26 @@ class PyTorch(PythonPackage, CudaPackage):
# env.set('USE_SYSTEM_GLOO', 'ON') # env.set('USE_SYSTEM_GLOO', 'ON')
# https://github.com/Maratyszcza/FP16/issues/18 # https://github.com/Maratyszcza/FP16/issues/18
# env.set('USE_SYSTEM_FP16', 'ON') # env.set('USE_SYSTEM_FP16', 'ON')
env.set('USE_SYSTEM_PTHREADPOOL', 'ON') env.set("USE_SYSTEM_PTHREADPOOL", "ON")
env.set('USE_SYSTEM_PSIMD', 'ON') env.set("USE_SYSTEM_PSIMD", "ON")
env.set('USE_SYSTEM_FXDIV', 'ON') env.set("USE_SYSTEM_FXDIV", "ON")
env.set('USE_SYSTEM_BENCHMARK', 'ON') env.set("USE_SYSTEM_BENCHMARK", "ON")
# https://github.com/pytorch/pytorch/issues/60331 # https://github.com/pytorch/pytorch/issues/60331
# env.set('USE_SYSTEM_ONNX', 'ON') # env.set('USE_SYSTEM_ONNX', 'ON')
# https://github.com/pytorch/pytorch/issues/60332 # https://github.com/pytorch/pytorch/issues/60332
# env.set('USE_SYSTEM_XNNPACK', 'ON') # env.set('USE_SYSTEM_XNNPACK', 'ON')
@run_before('install') @run_before("install")
def build_amd(self): def build_amd(self):
if '+rocm' in self.spec: if "+rocm" in self.spec:
python(os.path.join('tools', 'amd_build', 'build_amd.py')) python(os.path.join("tools", "amd_build", "build_amd.py"))
@run_after('install') @run_after("install")
@on_package_attributes(run_tests=True) @on_package_attributes(run_tests=True)
def install_test(self): def install_test(self):
with working_dir('test'): with working_dir("test"):
python('run_test.py') python("run_test.py")
# Tests need to be re-added since `phases` was overridden # Tests need to be re-added since `phases` was overridden
run_after('install')( run_after("install")(PythonPackage._run_default_install_time_test_callbacks)
PythonPackage._run_default_install_time_test_callbacks) run_after("install")(PythonPackage.sanity_check_prefix)
run_after('install')(PythonPackage.sanity_check_prefix)

File diff suppressed because it is too large Load diff

View file

@ -30,7 +30,7 @@ class Armcomputelibrary(SConsPackage):
url = "https://github.com/ARM-software/ComputeLibrary/archive/refs/tags/v23.02.zip" url = "https://github.com/ARM-software/ComputeLibrary/archive/refs/tags/v23.02.zip"
git = "https://github.com/ARM-software/ComputeLibrary.git" git = "https://github.com/ARM-software/ComputeLibrary.git"
maintainers = ["annop-w"] maintainers("annop-w")
version("23.02", sha256="bed1b24047ce00155e552204bc3983e86f46775414c554a34a7ece931d67ec62") version("23.02", sha256="bed1b24047ce00155e552204bc3983e86f46775414c554a34a7ece931d67ec62")
version("22.11", sha256="2f70f54d84390625222503ea38650c00c49d4b70bc86a6b9aeeebee9d243865f") version("22.11", sha256="2f70f54d84390625222503ea38650c00c49d4b70bc86a6b9aeeebee9d243865f")

View file

@ -25,7 +25,7 @@ class Babelstream(CMakePackage, CudaPackage, ROCmPackage):
version("4.0", sha256="a9cd39277fb15d977d468435eb9b894f79f468233f0131509aa540ffda4f5953") version("4.0", sha256="a9cd39277fb15d977d468435eb9b894f79f468233f0131509aa540ffda4f5953")
version("main", branch="main") version("main", branch="main")
version("develop", branch="develop") version("develop", branch="develop")
maintainers = ["tomdeakin", "kaanolgu" "tom91136", "robj0nes"] maintainers("tomdeakin", "kaanolgu" "tom91136", "robj0nes")
# Languages # Languages
# Also supported variants are cuda and rocm (for HIP) # Also supported variants are cuda and rocm (for HIP)

View file

@ -14,7 +14,7 @@ class CRaft(AutotoolsPackage):
git = "https://github.com/canonical/raft.git" git = "https://github.com/canonical/raft.git"
url = "https://github.com/canonical/raft/archive/refs/tags/v0.17.1.tar.gz" url = "https://github.com/canonical/raft/archive/refs/tags/v0.17.1.tar.gz"
maintainers = ["mdorier"] maintainers("mdorier")
version("master", branch="master") version("master", branch="master")
version("0.17.1", sha256="e31c7fafbdd5f94913161c5d64341a203364e512524b47295c97a91e83c4198b") version("0.17.1", sha256="e31c7fafbdd5f94913161c5d64341a203364e512524b47295c97a91e83c4198b")

View file

@ -17,7 +17,7 @@ class CbtfArgonavisGui(QMakePackage):
homepage = "https://sourceforge.net/p/cbtf/wiki/Home/" homepage = "https://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-argonavis-gui.git" git = "https://github.com/OpenSpeedShop/cbtf-argonavis-gui.git"
maintainers = ["jgalarowicz"] maintainers("jgalarowicz")
version("develop", branch="master") version("develop", branch="master")
version("1.3.0.0", branch="1.3.0.0") version("1.3.0.0", branch="1.3.0.0")

View file

@ -15,7 +15,7 @@ class CbtfArgonavis(CMakePackage):
homepage = "https://sourceforge.net/p/cbtf/wiki/Home/" homepage = "https://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-argonavis.git" git = "https://github.com/OpenSpeedShop/cbtf-argonavis.git"
maintainers = ["jgalarowicz"] maintainers("jgalarowicz")
version("develop", branch="master") version("develop", branch="master")
version("1.9.4.1", branch="1.9.4.1") version("1.9.4.1", branch="1.9.4.1")

View file

@ -18,7 +18,7 @@ class CbtfKrell(CMakePackage):
homepage = "https://sourceforge.net/p/cbtf/wiki/Home/" homepage = "https://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-krell.git" git = "https://github.com/OpenSpeedShop/cbtf-krell.git"
maintainers = ["jgalarowicz"] maintainers("jgalarowicz")
version("develop", branch="master") version("develop", branch="master")
version("1.9.4.1", branch="1.9.4.1") version("1.9.4.1", branch="1.9.4.1")

View file

@ -13,7 +13,7 @@ class CbtfLanl(CMakePackage):
homepage = "https://sourceforge.net/p/cbtf/wiki/Home/" homepage = "https://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-lanl.git" git = "https://github.com/OpenSpeedShop/cbtf-lanl.git"
maintainers = ["jgalarowicz"] maintainers("jgalarowicz")
version("develop", branch="master") version("develop", branch="master")
version("1.9.4.1", branch="1.9.4.1") version("1.9.4.1", branch="1.9.4.1")

View file

@ -18,7 +18,7 @@ class Cbtf(CMakePackage):
homepage = "https://sourceforge.net/p/cbtf/wiki/Home" homepage = "https://sourceforge.net/p/cbtf/wiki/Home"
git = "https://github.com/OpenSpeedShop/cbtf.git" git = "https://github.com/OpenSpeedShop/cbtf.git"
maintainers = ["jgalarowicz"] maintainers("jgalarowicz")
version("develop", branch="master") version("develop", branch="master")
version("1.9.4.1", branch="1.9.4.1") version("1.9.4.1", branch="1.9.4.1")

View file

@ -17,7 +17,7 @@ class DoubleBatchedFftLibrary(CMakePackage):
url = "https://github.com/intel/double-batched-fft-library/archive/refs/tags/v0.3.6.tar.gz" url = "https://github.com/intel/double-batched-fft-library/archive/refs/tags/v0.3.6.tar.gz"
git = "https://github.com/intel/double-batched-fft-library.git" git = "https://github.com/intel/double-batched-fft-library.git"
maintainers = ["uphoffc"] maintainers("uphoffc")
version("main", branch="main") version("main", branch="main")
version("develop", branch="develop") version("develop", branch="develop")

View file

@ -16,7 +16,7 @@ class Dpmjet(MakefilePackage):
list_url = "https://github.com/DPMJET/DPMJET/tags" list_url = "https://github.com/DPMJET/DPMJET/tags"
git = "https://github.com/DPMJET/DPMJET.git" git = "https://github.com/DPMJET/DPMJET.git"
maintainers = ["wdconinc"] maintainers("wdconinc")
version("19.3.5", sha256="5a546ca20f86abaecda1828eb5b577aee8a532dffb2c5e7244667d5f25777909") version("19.3.5", sha256="5a546ca20f86abaecda1828eb5b577aee8a532dffb2c5e7244667d5f25777909")
version("19.3.4", sha256="646f520aa67ef6355c45cde155a5dd55f7c9d661314358a7668f6ff472f5d5f9") version("19.3.4", sha256="646f520aa67ef6355c45cde155a5dd55f7c9d661314358a7668f6ff472f5d5f9")

View file

@ -16,7 +16,7 @@ class Estarlight(CMakePackage):
list_url = "https://github.com/eic/estarlight/tags" list_url = "https://github.com/eic/estarlight/tags"
git = "https://github.com/eic/estarlight.git" git = "https://github.com/eic/estarlight.git"
maintainers = ["wdconinc"] maintainers("wdconinc")
version("master", branch="master") version("master", branch="master")
version("1.0.1", sha256="b43c1dd3663d8f325f30b17dd7cf4b49f2eb8ceeed7319c5aabebec8676279fd") version("1.0.1", sha256="b43c1dd3663d8f325f30b17dd7cf4b49f2eb8ceeed7319c5aabebec8676279fd")

View file

@ -12,7 +12,7 @@ class Getorganelle(PythonPackage):
homepage = "https://github.com/Kinggerm/GetOrganelle" homepage = "https://github.com/Kinggerm/GetOrganelle"
url = "https://github.com/Kinggerm/GetOrganelle/archive/refs/tags/1.7.5.0.tar.gz" url = "https://github.com/Kinggerm/GetOrganelle/archive/refs/tags/1.7.5.0.tar.gz"
maintainers = ["snehring"] maintainers("snehring")
version("1.7.7.0", sha256="dd351b5cd33688adfcd8bff9794ae0cc0ce01a572dac2bcf6c9d7db77b3e4883") version("1.7.7.0", sha256="dd351b5cd33688adfcd8bff9794ae0cc0ce01a572dac2bcf6c9d7db77b3e4883")
version("1.7.5.0", sha256="c498196737726cb4c0158f23037bf301a069f5028ece729bb4d09c7d915df93d") version("1.7.5.0", sha256="c498196737726cb4c0158f23037bf301a069f5028ece729bb4d09c7d915df93d")

View file

@ -12,7 +12,7 @@ class GsiNcdiag(CMakePackage):
homepage = "https://github.com/NOAA-EMC/GSI-ncdiag" homepage = "https://github.com/NOAA-EMC/GSI-ncdiag"
url = "https://github.com/NOAA-EMC/GSI-ncdiag/archive/refs/tags/v1.0.0.tar.gz" url = "https://github.com/NOAA-EMC/GSI-ncdiag/archive/refs/tags/v1.0.0.tar.gz"
maintainers = ["ulmononian"] maintainers("ulmononian")
version("1.0.0", sha256="7251d6139c2bc1580db5f7f019e10a4c73d188ddd52ccf21ecc9e39d50a6af51") version("1.0.0", sha256="7251d6139c2bc1580db5f7f019e10a4c73d188ddd52ccf21ecc9e39d50a6af51")

View file

@ -14,7 +14,7 @@ class Lcov(MakefilePackage):
homepage = "http://ltp.sourceforge.net/coverage/lcov.php" homepage = "http://ltp.sourceforge.net/coverage/lcov.php"
url = "https://github.com/linux-test-project/lcov/releases/download/v1.14/lcov-1.14.tar.gz" url = "https://github.com/linux-test-project/lcov/releases/download/v1.14/lcov-1.14.tar.gz"
maintainers = ["KineticTheory"] maintainers("KineticTheory")
version("1.16", sha256="987031ad5528c8a746d4b52b380bc1bffe412de1f2b9c2ba5224995668e3240b") version("1.16", sha256="987031ad5528c8a746d4b52b380bc1bffe412de1f2b9c2ba5224995668e3240b")
version("1.15", sha256="c1cda2fa33bec9aa2c2c73c87226cfe97de0831887176b45ee523c5e30f8053a") version("1.15", sha256="c1cda2fa33bec9aa2c2c73c87226cfe97de0831887176b45ee523c5e30f8053a")

View file

@ -13,7 +13,7 @@ class Netcdf95(CMakePackage):
homepage = "https://lguez.github.io/NetCDF95/" homepage = "https://lguez.github.io/NetCDF95/"
git = "https://github.com/lguez/NetCDF95.git" git = "https://github.com/lguez/NetCDF95.git"
maintainers = ["RemiLacroix-IDRIS"] maintainers("RemiLacroix-IDRIS")
version("0.3", tag="v0.3", submodules=True) version("0.3", tag="v0.3", submodules=True)

View file

@ -31,7 +31,7 @@ class OpenspeedshopUtils(CMakePackage):
homepage = "http://www.openspeedshop.org" homepage = "http://www.openspeedshop.org"
git = "https://github.com/OpenSpeedShop/openspeedshop.git" git = "https://github.com/OpenSpeedShop/openspeedshop.git"
maintainers = ["jgalarowicz"] maintainers("jgalarowicz")
version("develop", branch="master") version("develop", branch="master")
version("2.4.2.1", branch="2.4.2.1") version("2.4.2.1", branch="2.4.2.1")

View file

@ -26,7 +26,7 @@ class Openspeedshop(CMakePackage):
homepage = "http://www.openspeedshop.org" homepage = "http://www.openspeedshop.org"
git = "https://github.com/OpenSpeedShop/openspeedshop.git" git = "https://github.com/OpenSpeedShop/openspeedshop.git"
maintainers = ["jgalarowicz"] maintainers("jgalarowicz")
version("develop", branch="master") version("develop", branch="master")
version("2.4.2.1", branch="2.4.2.1") version("2.4.2.1", branch="2.4.2.1")

View file

@ -12,7 +12,7 @@ class PyAmityping(PythonPackage):
homepage = "https://github.com/slac-lcls/amityping" homepage = "https://github.com/slac-lcls/amityping"
url = "https://github.com/slac-lcls/amityping/archive/refs/tags/1.1.12.tar.gz" url = "https://github.com/slac-lcls/amityping/archive/refs/tags/1.1.12.tar.gz"
maintainers = ["valmar"] maintainers("valmar")
version("1.1.12", sha256="e00e7102a53fa6ee343f018669f6b811d703a2da4728b497f80579bf89efbd3c") version("1.1.12", sha256="e00e7102a53fa6ee343f018669f6b811d703a2da4728b497f80579bf89efbd3c")

View file

@ -11,7 +11,7 @@ class PyLclsKrtc(PythonPackage):
pypi = "lcls-krtc/lcls-krtc-0.2.0.tar.gz" pypi = "lcls-krtc/lcls-krtc-0.2.0.tar.gz"
maintainers = ["valmar"] maintainers("valmar")
version("0.2.0", sha256="20e6327d488d23e29135be44504bf7df72e4425a518f4222841efcd2cd2985f9") version("0.2.0", sha256="20e6327d488d23e29135be44504bf7df72e4425a518f4222841efcd2cd2985f9")

View file

@ -13,7 +13,7 @@ class PyMinkowskiengine(PythonPackage, CudaPackage):
homepage = "https://nvidia.github.io/MinkowskiEngine/" homepage = "https://nvidia.github.io/MinkowskiEngine/"
pypi = "MinkowskiEngine/MinkowskiEngine-0.5.4.tar.gz" pypi = "MinkowskiEngine/MinkowskiEngine-0.5.4.tar.gz"
maintainers = ["wdconinc"] maintainers("wdconinc")
version("0.5.4", sha256="b1879c00d0b0b1d30ba622cce239886a7e3c78ee9da1064cdfe2f64c2ab15f94") version("0.5.4", sha256="b1879c00d0b0b1d30ba622cce239886a7e3c78ee9da1064cdfe2f64c2ab15f94")

View file

@ -12,7 +12,7 @@ class PyPyabel(PythonPackage):
homepage = "https://github.com/PyAbel/PyAbel" homepage = "https://github.com/PyAbel/PyAbel"
pypi = "PyAbel/PyAbel-0.9.0.tar.gz" pypi = "PyAbel/PyAbel-0.9.0.tar.gz"
maintainers = ["valmar"] maintainers("valmar")
version("0.9.0", sha256="4052143de9da19be13bb321fb0524090ffc8cdc56e0e990e5d6f557f18109f08") version("0.9.0", sha256="4052143de9da19be13bb321fb0524090ffc8cdc56e0e990e5d6f557f18109f08")

View file

@ -16,7 +16,7 @@ class Rocmlir(CMakePackage):
git = "https://github.com/ROCmSoftwarePlatform/rocMLIR.git" git = "https://github.com/ROCmSoftwarePlatform/rocMLIR.git"
url = "https://github.com/ROCmSoftwarePlatform/rocMLIR/archive/refs/tags/rocm-5.4.3.tar.gz" url = "https://github.com/ROCmSoftwarePlatform/rocMLIR/archive/refs/tags/rocm-5.4.3.tar.gz"
maintainers = ["srekolam"] maintainers("srekolam")
version("5.4.3", sha256="c0ba0f565e1c6614c9e6091a24cbef67b734a29e4a4ed7a8a57dc43f58ed8d53") version("5.4.3", sha256="c0ba0f565e1c6614c9e6091a24cbef67b734a29e4a4ed7a8a57dc43f58ed8d53")
version("5.4.0", sha256="3823f455ee392118c3281e27d45fa0e5381f3c4070eb4e06ba13bc6b34a90a60") version("5.4.0", sha256="3823f455ee392118c3281e27d45fa0e5381f3c4070eb4e06ba13bc6b34a90a60")

View file

@ -14,7 +14,7 @@ class Scorep(AutotoolsPackage):
homepage = "https://www.vi-hps.org/projects/score-p" homepage = "https://www.vi-hps.org/projects/score-p"
url = "https://perftools.pages.jsc.fz-juelich.de/cicd/scorep/tags/scorep-7.1/scorep-7.1.tar.gz" url = "https://perftools.pages.jsc.fz-juelich.de/cicd/scorep/tags/scorep-7.1/scorep-7.1.tar.gz"
maintainers = ["wrwilliams"] maintainers("wrwilliams")
version("8.0", sha256="4c0f34f20999f92ebe6ca1ff706d0846b8ce6cd537ffbedb49dfaef0faa66311") version("8.0", sha256="4c0f34f20999f92ebe6ca1ff706d0846b8ce6cd537ffbedb49dfaef0faa66311")
version("7.1", sha256="98dea497982001fb82da3429ca55669b2917a0858c71abe2cfe7cd113381f1f7") version("7.1", sha256="98dea497982001fb82da3429ca55669b2917a0858c71abe2cfe7cd113381f1f7")

View file

@ -21,7 +21,7 @@ class Tandem(CMakePackage):
version("1.0", tag="v1.0", submodules=True) version("1.0", tag="v1.0", submodules=True)
patch("fix_v1.0_compilation.diff", when="@1.0") patch("fix_v1.0_compilation.diff", when="@1.0")
maintainers = ["dmay23", "Thomas-Ulrich"] maintainers("dmay23", "Thomas-Ulrich")
variant("polynomial_degree", default="2") variant("polynomial_degree", default="2")
variant("domain_dimension", default="2", values=("2", "3"), multi=False) variant("domain_dimension", default="2", values=("2", "3"), multi=False)
variant("min_quadrature_order", default="0") variant("min_quadrature_order", default="0")

View file

@ -22,7 +22,7 @@ class Thrift(Package):
list_url = "http://archive.apache.org/dist/thrift/" list_url = "http://archive.apache.org/dist/thrift/"
list_depth = 1 list_depth = 1
maintainers = ["thomas-bouvier"] maintainers("thomas-bouvier")
version("0.18.1", sha256="04c6f10e5d788ca78e13ee2ef0d2152c7b070c0af55483d6b942e29cff296726") version("0.18.1", sha256="04c6f10e5d788ca78e13ee2ef0d2152c7b070c0af55483d6b942e29cff296726")
version("0.17.0", sha256="b272c1788bb165d99521a2599b31b97fa69e5931d099015d91ae107a0b0cc58f") version("0.17.0", sha256="b272c1788bb165d99521a2599b31b97fa69e5931d099015d91ae107a0b0cc58f")

View file

@ -12,7 +12,7 @@ class Xtcdata(CMakePackage):
homepage = "https://github.com/slac-lcls/lcls2" homepage = "https://github.com/slac-lcls/lcls2"
url = "https://github.com/slac-lcls/lcls2/archive/refs/tags/3.3.37.tar.gz" url = "https://github.com/slac-lcls/lcls2/archive/refs/tags/3.3.37.tar.gz"
maintainers = ["valmar"] maintainers("valmar")
version("3.3.37", sha256="127a5ae44c9272039708bd877849a3af354ce881fde093a2fc6fe0550b698b72") version("3.3.37", sha256="127a5ae44c9272039708bd877849a3af354ce881fde093a2fc6fe0550b698b72")