maintainers: switch from list to directive (#37752)

This commit is contained in:
Tamara Dahlgren 2023-05-17 15:25:57 -07:00 committed by GitHub
parent 125c20bc06
commit dcfcc03497
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 1269 additions and 1180 deletions

View file

@ -31,194 +31,164 @@ class Amdfftw(FftwBase):
Example : spack install amdfftw precision=float
"""
_name = 'amdfftw'
_name = "amdfftw"
homepage = "https://developer.amd.com/amd-aocl/fftw/"
url = "https://github.com/amd/amd-fftw/archive/3.0.tar.gz"
git = "https://github.com/amd/amd-fftw.git"
maintainers = ['amd-toolchain-support']
maintainers("amd-toolchain-support")
version('3.1', sha256='3e777f3acef13fa1910db097e818b1d0d03a6a36ef41186247c6ab1ab0afc132')
version('3.0.1', sha256='87030c6bbb9c710f0a64f4f306ba6aa91dc4b182bb804c9022b35aef274d1a4c')
version('3.0', sha256='a69deaf45478a59a69f77c4f7e9872967f1cfe996592dd12beb6318f18ea0bcd')
version('2.2', sha256='de9d777236fb290c335860b458131678f75aa0799c641490c644c843f0e246f8')
version("3.1", sha256="3e777f3acef13fa1910db097e818b1d0d03a6a36ef41186247c6ab1ab0afc132")
version("3.0.1", sha256="87030c6bbb9c710f0a64f4f306ba6aa91dc4b182bb804c9022b35aef274d1a4c")
version("3.0", sha256="a69deaf45478a59a69f77c4f7e9872967f1cfe996592dd12beb6318f18ea0bcd")
version("2.2", sha256="de9d777236fb290c335860b458131678f75aa0799c641490c644c843f0e246f8")
variant('shared', default=True,
description='Builds a shared version of the library')
variant('openmp', default=True,
description='Enable OpenMP support')
variant('threads', default=False,
description='Enable SMP threads support')
variant('debug', default=False,
description='Builds a debug version of the library')
variant("shared", default=True, description="Builds a shared version of the library")
variant("openmp", default=True, description="Enable OpenMP support")
variant("threads", default=False, description="Enable SMP threads support")
variant("debug", default=False, description="Builds a debug version of the library")
variant(
'amd-fast-planner',
"amd-fast-planner",
default=False,
description='Option to reduce the planning time without much'
'tradeoff in the performance. It is supported for'
'Float and double precisions only.')
description="Option to reduce the planning time without much"
"tradeoff in the performance. It is supported for"
"Float and double precisions only.",
)
variant("amd-top-n-planner", default=False, description="Build with amd-top-n-planner support")
variant(
'amd-top-n-planner',
default=False,
description='Build with amd-top-n-planner support')
variant(
'amd-mpi-vader-limit',
default=False,
description='Build with amd-mpi-vader-limit support')
variant(
'static',
default=False,
description='Build with static suppport')
variant(
'amd-trans',
default=False,
description='Build with amd-trans suppport')
variant(
'amd-app-opt',
default=False,
description='Build with amd-app-opt suppport')
"amd-mpi-vader-limit", default=False, description="Build with amd-mpi-vader-limit support"
)
variant("static", default=False, description="Build with static suppport")
variant("amd-trans", default=False, description="Build with amd-trans suppport")
variant("amd-app-opt", default=False, description="Build with amd-app-opt suppport")
depends_on('texinfo')
depends_on("texinfo")
provides('fftw-api@3', when='@2:')
provides("fftw-api@3", when="@2:")
conflicts(
'precision=quad',
when='@2.2 %aocc',
msg='Quad precision is not supported by AOCC clang version 2.2')
"precision=quad",
when="@2.2 %aocc",
msg="Quad precision is not supported by AOCC clang version 2.2",
)
conflicts(
'+debug',
when='@2.2 %aocc',
msg='debug mode is not supported by AOCC clang version 2.2')
"+debug", when="@2.2 %aocc", msg="debug mode is not supported by AOCC clang version 2.2"
)
conflicts("%gcc@:7.2", when="@2.2:", msg="GCC version above 7.2 is required for AMDFFTW")
conflicts(
'%gcc@:7.2',
when='@2.2:',
msg='GCC version above 7.2 is required for AMDFFTW')
"+amd-fast-planner ", when="+mpi", msg="mpi thread is not supported with amd-fast-planner"
)
conflicts(
'+amd-fast-planner ',
when='+mpi',
msg='mpi thread is not supported with amd-fast-planner')
"+amd-fast-planner", when="@2.2", msg="amd-fast-planner is supported from 3.0 onwards"
)
conflicts(
'+amd-fast-planner',
when='@2.2',
msg='amd-fast-planner is supported from 3.0 onwards')
"+amd-fast-planner",
when="precision=quad",
msg="Quad precision is not supported with amd-fast-planner",
)
conflicts(
'+amd-fast-planner',
when='precision=quad',
msg='Quad precision is not supported with amd-fast-planner')
"+amd-fast-planner",
when="precision=long_double",
msg="long_double precision is not supported with amd-fast-planner",
)
conflicts(
'+amd-fast-planner',
when='precision=long_double',
msg='long_double precision is not supported with amd-fast-planner')
"+amd-top-n-planner",
when="@:3.0.0",
msg="amd-top-n-planner is supported from 3.0.1 onwards",
)
conflicts(
'+amd-top-n-planner',
when='@:3.0.0',
msg='amd-top-n-planner is supported from 3.0.1 onwards')
"+amd-top-n-planner",
when="precision=long_double",
msg="long_double precision is not supported with amd-top-n-planner",
)
conflicts(
'+amd-top-n-planner',
when='precision=long_double',
msg='long_double precision is not supported with amd-top-n-planner')
"+amd-top-n-planner",
when="precision=quad",
msg="Quad precision is not supported with amd-top-n-planner",
)
conflicts(
'+amd-top-n-planner',
when='precision=quad',
msg='Quad precision is not supported with amd-top-n-planner')
"+amd-top-n-planner",
when="+amd-fast-planner",
msg="amd-top-n-planner cannot be used with amd-fast-planner",
)
conflicts(
'+amd-top-n-planner',
when='+amd-fast-planner',
msg='amd-top-n-planner cannot be used with amd-fast-planner')
"+amd-top-n-planner", when="+threads", msg="amd-top-n-planner works only for single thread"
)
conflicts(
'+amd-top-n-planner',
when='+threads',
msg='amd-top-n-planner works only for single thread')
"+amd-top-n-planner", when="+mpi", msg="mpi thread is not supported with amd-top-n-planner"
)
conflicts(
'+amd-top-n-planner',
when='+mpi',
msg='mpi thread is not supported with amd-top-n-planner')
"+amd-top-n-planner",
when="+openmp",
msg="openmp thread is not supported with amd-top-n-planner",
)
conflicts(
'+amd-top-n-planner',
when='+openmp',
msg='openmp thread is not supported with amd-top-n-planner')
"+amd-mpi-vader-limit",
when="@:3.0.0",
msg="amd-mpi-vader-limit is supported from 3.0.1 onwards",
)
conflicts(
'+amd-mpi-vader-limit',
when='@:3.0.0',
msg='amd-mpi-vader-limit is supported from 3.0.1 onwards')
"+amd-mpi-vader-limit",
when="precision=quad",
msg="Quad precision is not supported with amd-mpi-vader-limit",
)
conflicts("+amd-trans", when="+threads", msg="amd-trans works only for single thread")
conflicts("+amd-trans", when="+mpi", msg="mpi thread is not supported with amd-trans")
conflicts("+amd-trans", when="+openmp", msg="openmp thread is not supported with amd-trans")
conflicts(
'+amd-mpi-vader-limit',
when='precision=quad',
msg='Quad precision is not supported with amd-mpi-vader-limit')
"+amd-trans",
when="precision=long_double",
msg="long_double precision is not supported with amd-trans",
)
conflicts(
'+amd-trans',
when='+threads',
msg='amd-trans works only for single thread')
"+amd-trans", when="precision=quad", msg="Quad precision is not supported with amd-trans"
)
conflicts("+amd-app-opt", when="@:3.0.1", msg="amd-app-opt is supported from 3.1 onwards")
conflicts("+amd-app-opt", when="+mpi", msg="mpi thread is not supported with amd-app-opt")
conflicts(
'+amd-trans',
when='+mpi',
msg='mpi thread is not supported with amd-trans')
"+amd-app-opt",
when="precision=long_double",
msg="long_double precision is not supported with amd-app-opt",
)
conflicts(
'+amd-trans',
when='+openmp',
msg='openmp thread is not supported with amd-trans')
conflicts(
'+amd-trans',
when='precision=long_double',
msg='long_double precision is not supported with amd-trans')
conflicts(
'+amd-trans',
when='precision=quad',
msg='Quad precision is not supported with amd-trans')
conflicts(
'+amd-app-opt',
when='@:3.0.1',
msg='amd-app-opt is supported from 3.1 onwards')
conflicts(
'+amd-app-opt',
when='+mpi',
msg='mpi thread is not supported with amd-app-opt')
conflicts(
'+amd-app-opt',
when='precision=long_double',
msg='long_double precision is not supported with amd-app-opt')
conflicts(
'+amd-app-opt',
when='precision=quad',
msg='Quad precision is not supported with amd-app-opt')
"+amd-app-opt",
when="precision=quad",
msg="Quad precision is not supported with amd-app-opt",
)
def configure(self, spec, prefix):
"""Configure function"""
# Base options
options = [
'--prefix={0}'.format(prefix),
'--enable-amd-opt'
]
options = ["--prefix={0}".format(prefix), "--enable-amd-opt"]
# Check if compiler is AOCC
if '%aocc' in spec:
options.append('CC={0}'.format(os.path.basename(spack_cc)))
options.append('FC={0}'.format(os.path.basename(spack_fc)))
options.append('F77={0}'.format(os.path.basename(spack_fc)))
if "%aocc" in spec:
options.append("CC={0}".format(os.path.basename(spack_cc)))
options.append("FC={0}".format(os.path.basename(spack_fc)))
options.append("F77={0}".format(os.path.basename(spack_fc)))
if '+debug' in spec:
options.append('--enable-debug')
if "+debug" in spec:
options.append("--enable-debug")
if '+mpi' in spec:
options.append('--enable-mpi')
options.append('--enable-amd-mpifft')
if "+mpi" in spec:
options.append("--enable-mpi")
options.append("--enable-amd-mpifft")
else:
options.append('--disable-mpi')
options.append('--disable-amd-mpifft')
options.append("--disable-mpi")
options.append("--disable-amd-mpifft")
options.extend(self.enable_or_disable('shared'))
options.extend(self.enable_or_disable('openmp'))
options.extend(self.enable_or_disable('threads'))
options.extend(self.enable_or_disable('amd-fast-planner'))
options.extend(self.enable_or_disable('amd-top-n-planner'))
options.extend(self.enable_or_disable('amd-mpi-vader-limit'))
options.extend(self.enable_or_disable('static'))
options.extend(self.enable_or_disable('amd-trans'))
options.extend(self.enable_or_disable('amd-app-opt'))
options.extend(self.enable_or_disable("shared"))
options.extend(self.enable_or_disable("openmp"))
options.extend(self.enable_or_disable("threads"))
options.extend(self.enable_or_disable("amd-fast-planner"))
options.extend(self.enable_or_disable("amd-top-n-planner"))
options.extend(self.enable_or_disable("amd-mpi-vader-limit"))
options.extend(self.enable_or_disable("static"))
options.extend(self.enable_or_disable("amd-trans"))
options.extend(self.enable_or_disable("amd-app-opt"))
if not self.compiler.f77 or not self.compiler.fc:
options.append('--disable-fortran')
options.append("--disable-fortran")
# Cross compilation is supported in amd-fftw by making use of target
# variable to set AMD_ARCH configure option.
@ -226,17 +196,16 @@ class Amdfftw(FftwBase):
# use target variable to set appropriate -march option in AMD_ARCH.
arch = spec.architecture
options.append(
'AMD_ARCH={0}'.format(
arch.target.optimization_flags(
spec.compiler).split('=')[-1]))
"AMD_ARCH={0}".format(arch.target.optimization_flags(spec.compiler).split("=")[-1])
)
# Specific SIMD support.
# float and double precisions are supported
simd_features = ['sse2', 'avx', 'avx2']
simd_features = ["sse2", "avx", "avx2"]
simd_options = []
for feature in simd_features:
msg = '--enable-{0}' if feature in spec.target else '--disable-{0}'
msg = "--enable-{0}" if feature in spec.target else "--disable-{0}"
simd_options.append(msg.format(feature))
# When enabling configure option "--enable-amd-opt", do not use the
@ -246,20 +215,19 @@ class Amdfftw(FftwBase):
# Double is the default precision, for all the others we need
# to enable the corresponding option.
enable_precision = {
'float': ['--enable-float'],
'double': None,
'long_double': ['--enable-long-double'],
'quad': ['--enable-quad-precision']
"float": ["--enable-float"],
"double": None,
"long_double": ["--enable-long-double"],
"quad": ["--enable-quad-precision"],
}
# Different precisions must be configured and compiled one at a time
configure = Executable('../configure')
configure = Executable("../configure")
for precision in self.selected_precisions:
opts = (enable_precision[precision] or []) + options[:]
# SIMD optimizations are available only for float and double
if precision in ('float', 'double'):
if precision in ("float", "double"):
opts += simd_options
with working_dir(precision, create=True):

View file

@ -26,11 +26,11 @@ class Llvm(CMakePackage, CudaPackage):
url = "https://github.com/llvm/llvm-project/archive/llvmorg-7.1.0.tar.gz"
list_url = "https://releases.llvm.org/download.html"
git = "https://github.com/llvm/llvm-project"
maintainers = ['trws', 'haampie']
maintainers("trws", "haampie")
tags = ['e4s']
tags = ["e4s"]
generator = 'Ninja'
generator = "Ninja"
family = "compiler" # Used by lmod
@ -80,13 +80,12 @@ class Llvm(CMakePackage, CudaPackage):
# to save space, build with `build_type=Release`.
variant(
"clang",
default=True,
description="Build the LLVM C/C++/Objective-C compiler frontend",
"clang", default=True, description="Build the LLVM C/C++/Objective-C compiler frontend"
)
variant(
"flang",
default=False, when='@11: +clang',
default=False,
when="@11: +clang",
description="Build the LLVM Fortran compiler frontend "
"(experimental - parser only, needs GCC)",
)
@ -95,27 +94,23 @@ class Llvm(CMakePackage, CudaPackage):
default=False,
description="Include debugging code in OpenMP runtime libraries",
)
variant("lldb", default=True, when='+clang', description="Build the LLVM debugger")
variant("lldb", default=True, when="+clang", description="Build the LLVM debugger")
variant("lld", default=True, description="Build the LLVM linker")
variant("mlir", default=False, when='@10:', description="Build with MLIR support")
variant("mlir", default=False, when="@10:", description="Build with MLIR support")
variant(
"internal_unwind",
default=True, when='+clang',
description="Build the libcxxabi libunwind",
"internal_unwind", default=True, when="+clang", description="Build the libcxxabi libunwind"
)
variant(
"polly",
default=True,
description="Build the LLVM polyhedral optimization plugin, "
"only builds for 3.7.0+",
description="Build the LLVM polyhedral optimization plugin, " "only builds for 3.7.0+",
)
variant(
"libcxx",
default=True, when='+clang',
description="Build the LLVM C++ standard library",
"libcxx", default=True, when="+clang", description="Build the LLVM C++ standard library"
)
variant(
"compiler-rt", when='+clang',
"compiler-rt",
when="+clang",
default=True,
description="Build LLVM compiler runtime, including sanitizers",
)
@ -124,11 +119,7 @@ class Llvm(CMakePackage, CudaPackage):
default=(sys.platform != "darwin"),
description="Add support for LTO with the gold linker plugin",
)
variant(
"split_dwarf",
default=False,
description="Build with split dwarf information",
)
variant("split_dwarf", default=False, description="Build with split dwarf information")
variant(
"llvm_dylib",
default=True,
@ -136,18 +127,40 @@ class Llvm(CMakePackage, CudaPackage):
)
variant(
"link_llvm_dylib",
default=False, when='+llvm_dylib',
default=False,
when="+llvm_dylib",
description="Link LLVM tools against the LLVM shared library",
)
variant(
"targets",
default="none",
description=("What targets to build. Spack's target family is always added "
"(e.g. X86 is automatically enabled when targeting znver2)."),
values=("all", "none", "aarch64", "amdgpu", "arm", "avr", "bpf", "cppbackend",
"hexagon", "lanai", "mips", "msp430", "nvptx", "powerpc", "riscv",
"sparc", "systemz", "webassembly", "x86", "xcore"),
multi=True
description=(
"What targets to build. Spack's target family is always added "
"(e.g. X86 is automatically enabled when targeting znver2)."
),
values=(
"all",
"none",
"aarch64",
"amdgpu",
"arm",
"avr",
"bpf",
"cppbackend",
"hexagon",
"lanai",
"mips",
"msp430",
"nvptx",
"powerpc",
"riscv",
"sparc",
"systemz",
"webassembly",
"x86",
"xcore",
),
multi=True,
)
variant(
"build_type",
@ -157,51 +170,52 @@ class Llvm(CMakePackage, CudaPackage):
)
variant(
"omp_tsan",
default=False, when='@6:',
default=False,
when="@6:",
description="Build with OpenMP capable thread sanitizer",
)
variant(
"omp_as_runtime",
default=True,
when='+clang @12:',
when="+clang @12:",
description="Build OpenMP runtime via ENABLE_RUNTIME by just-built Clang",
)
variant('code_signing', default=False,
when='+lldb platform=darwin',
description="Enable code-signing on macOS")
variant("python", default=False, description="Install python bindings")
variant('version_suffix', default='none', description="Add a symbol suffix")
variant(
'shlib_symbol_version',
default='none',
"code_signing",
default=False,
when="+lldb platform=darwin",
description="Enable code-signing on macOS",
)
variant("python", default=False, description="Install python bindings")
variant("version_suffix", default="none", description="Add a symbol suffix")
variant(
"shlib_symbol_version",
default="none",
description="Add shared library symbol version",
when='@13:'
when="@13:",
)
variant(
'z3',
default=False,
when='+clang @8:',
description='Use Z3 for the clang static analyzer'
"z3", default=False, when="+clang @8:", description="Use Z3 for the clang static analyzer"
)
provides('libllvm@14', when='@14.0.0:14')
provides('libllvm@13', when='@13.0.0:13')
provides('libllvm@12', when='@12.0.0:12')
provides('libllvm@11', when='@11.0.0:11')
provides('libllvm@10', when='@10.0.0:10')
provides('libllvm@9', when='@9.0.0:9')
provides('libllvm@8', when='@8.0.0:8')
provides('libllvm@7', when='@7.0.0:7')
provides('libllvm@6', when='@6.0.0:6')
provides('libllvm@5', when='@5.0.0:5')
provides('libllvm@4', when='@4.0.0:4')
provides('libllvm@3', when='@3.0.0:3')
provides("libllvm@14", when="@14.0.0:14")
provides("libllvm@13", when="@13.0.0:13")
provides("libllvm@12", when="@12.0.0:12")
provides("libllvm@11", when="@11.0.0:11")
provides("libllvm@10", when="@10.0.0:10")
provides("libllvm@9", when="@9.0.0:9")
provides("libllvm@8", when="@8.0.0:8")
provides("libllvm@7", when="@7.0.0:7")
provides("libllvm@6", when="@6.0.0:6")
provides("libllvm@5", when="@5.0.0:5")
provides("libllvm@4", when="@4.0.0:4")
provides("libllvm@3", when="@3.0.0:3")
extends("python", when="+python")
# Build dependency
depends_on("cmake@3.4.3:", type="build")
depends_on('cmake@3.13.4:', type='build', when='@12:')
depends_on("cmake@3.13.4:", type="build", when="@12:")
depends_on("ninja", type="build")
depends_on("python@2.7:2.8", when="@:4 ~python", type="build")
depends_on("python", when="@5: ~python", type="build")
@ -242,7 +256,7 @@ class Llvm(CMakePackage, CudaPackage):
# clang/lib: a lambda parameter cannot shadow an explicitly captured entity
conflicts("%clang@8:", when="@:4")
# Internal compiler error on gcc 8.4 on aarch64 https://bugzilla.redhat.com/show_bug.cgi?id=1958295
conflicts('%gcc@8.4:8.4.9', when='@12: target=aarch64:')
conflicts("%gcc@8.4:8.4.9", when="@12: target=aarch64:")
# When these versions are concretized, but not explicitly with +libcxx, these
# conflicts will enable clingo to set ~libcxx, making the build successful:
@ -257,12 +271,12 @@ class Llvm(CMakePackage, CudaPackage):
conflicts("%apple-clang@:11", when="@13:+libcxx")
# libcxx-4 and compiler-rt-4 fail to build with "newer" clang and gcc versions:
conflicts('%gcc@7:', when='@:4+libcxx')
conflicts('%clang@6:', when='@:4+libcxx')
conflicts('%apple-clang@6:', when='@:4+libcxx')
conflicts('%gcc@7:', when='@:4+compiler-rt')
conflicts('%clang@6:', when='@:4+compiler-rt')
conflicts('%apple-clang@6:', when='@:4+compiler-rt')
conflicts("%gcc@7:", when="@:4+libcxx")
conflicts("%clang@6:", when="@:4+libcxx")
conflicts("%apple-clang@6:", when="@:4+libcxx")
conflicts("%gcc@7:", when="@:4+compiler-rt")
conflicts("%clang@6:", when="@:4+compiler-rt")
conflicts("%apple-clang@6:", when="@:4+compiler-rt")
# cuda_arch value must be specified
conflicts("cuda_arch=none", when="+cuda", msg="A value for cuda_arch must be specified.")
@ -270,27 +284,27 @@ class Llvm(CMakePackage, CudaPackage):
# LLVM bug https://bugs.llvm.org/show_bug.cgi?id=48234
# CMake bug: https://gitlab.kitware.com/cmake/cmake/-/issues/21469
# Fixed in upstream versions of both
conflicts('^cmake@3.19.0', when='@6:11.0.0')
conflicts("^cmake@3.19.0", when="@6:11.0.0")
# Github issue #4986
patch("llvm_gcc7.patch", when="@4.0.0:4.0.1+lldb %gcc@7.0:")
# sys/ustat.h has been removed in favour of statfs from glibc-2.28. Use fixed sizes:
patch('llvm5-sanitizer-ustat.patch', when="@4:6.0.0+compiler-rt")
patch("llvm5-sanitizer-ustat.patch", when="@4:6.0.0+compiler-rt")
# Fix lld templates: https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=230463
patch('llvm4-lld-ELF-Symbols.patch', when="@4+lld%clang@6:")
patch('llvm5-lld-ELF-Symbols.patch', when="@5+lld%clang@7:")
patch("llvm4-lld-ELF-Symbols.patch", when="@4+lld%clang@6:")
patch("llvm5-lld-ELF-Symbols.patch", when="@5+lld%clang@7:")
# Fix missing std:size_t in 'llvm@4:5' when built with '%clang@7:'
patch('xray_buffer_queue-cstddef.patch', when="@4:5+compiler-rt%clang@7:")
patch("xray_buffer_queue-cstddef.patch", when="@4:5+compiler-rt%clang@7:")
# https://github.com/llvm/llvm-project/commit/947f9692440836dcb8d88b74b69dd379d85974ce
patch('sanitizer-ipc_perm_mode.patch', when="@5:7+compiler-rt%clang@11:")
patch('sanitizer-ipc_perm_mode.patch', when="@5:9+compiler-rt%gcc@9:")
patch("sanitizer-ipc_perm_mode.patch", when="@5:7+compiler-rt%clang@11:")
patch("sanitizer-ipc_perm_mode.patch", when="@5:9+compiler-rt%gcc@9:")
# github.com/spack/spack/issues/24270: MicrosoftDemangle for %gcc@10: and %clang@13:
patch('missing-includes.patch', when='@8')
patch("missing-includes.patch", when="@8")
# Backport from llvm master + additional fix
# see https://bugs.llvm.org/show_bug.cgi?id=39696
@ -315,33 +329,33 @@ class Llvm(CMakePackage, CudaPackage):
patch("llvm_python_path.patch", when="@:11")
# Workaround for issue https://github.com/spack/spack/issues/18197
patch('llvm7_intel.patch', when='@7 %intel@18.0.2,19.0.0:19.1.99')
patch("llvm7_intel.patch", when="@7 %intel@18.0.2,19.0.0:19.1.99")
# Remove cyclades support to build against newer kernel headers
# https://reviews.llvm.org/D102059
patch('no_cyclades.patch', when='@10:12.0.0')
patch('no_cyclades9.patch', when='@6:9')
patch("no_cyclades.patch", when="@10:12.0.0")
patch("no_cyclades9.patch", when="@6:9")
patch('llvm-gcc11.patch', when='@9:11%gcc@11:')
patch("llvm-gcc11.patch", when="@9:11%gcc@11:")
# add -lpthread to build OpenMP libraries with Fujitsu compiler
patch('llvm12-thread.patch', when='@12 %fj')
patch('llvm13-thread.patch', when='@13 %fj')
patch("llvm12-thread.patch", when="@12 %fj")
patch("llvm13-thread.patch", when="@13 %fj")
# avoid build failed with Fujitsu compiler
patch('llvm13-fujitsu.patch', when='@13 %fj')
patch("llvm13-fujitsu.patch", when="@13 %fj")
# patch for missing hwloc.h include for libompd
patch('llvm14-hwloc-ompd.patch', when='@14')
patch("llvm14-hwloc-ompd.patch", when="@14")
# make libflags a list in openmp subproject when ~omp_as_runtime
patch('libomp-libflags-as-list.patch', when='@3.7:')
patch("libomp-libflags-as-list.patch", when="@3.7:")
# The functions and attributes below implement external package
# detection for LLVM. See:
#
# https://spack.readthedocs.io/en/latest/packaging_guide.html#making-a-package-discoverable-with-spack-external-find
executables = ['clang', 'flang', 'ld.lld', 'lldb']
executables = ["clang", "flang", "ld.lld", "lldb"]
@classmethod
def filter_detected_exes(cls, prefix, exes_in_prefix):
@ -351,7 +365,7 @@ class Llvm(CMakePackage, CudaPackage):
# on some port and would hang Spack during detection.
# clang-cl and clang-cpp are dev tools that we don't
# need to test
if any(x in exe for x in ('vscode', 'cpp', '-cl', '-gpu')):
if any(x in exe for x in ("vscode", "cpp", "-cl", "-gpu")):
continue
result.append(exe)
return result
@ -360,20 +374,20 @@ class Llvm(CMakePackage, CudaPackage):
def determine_version(cls, exe):
version_regex = re.compile(
# Normal clang compiler versions are left as-is
r'clang version ([^ )\n]+)-svn[~.\w\d-]*|'
r"clang version ([^ )\n]+)-svn[~.\w\d-]*|"
# Don't include hyphenated patch numbers in the version
# (see https://github.com/spack/spack/pull/14365 for details)
r'clang version ([^ )\n]+?)-[~.\w\d-]*|'
r'clang version ([^ )\n]+)|'
r"clang version ([^ )\n]+?)-[~.\w\d-]*|"
r"clang version ([^ )\n]+)|"
# LLDB
r'lldb version ([^ )\n]+)|'
r"lldb version ([^ )\n]+)|"
# LLD
r'LLD ([^ )\n]+) \(compatible with GNU linkers\)'
r"LLD ([^ )\n]+) \(compatible with GNU linkers\)"
)
try:
compiler = Executable(exe)
output = compiler('--version', output=str, error=str)
if 'Apple' in output:
output = compiler("--version", output=str, error=str)
if "Apple" in output:
return None
match = version_regex.search(output)
if match:
@ -387,38 +401,39 @@ class Llvm(CMakePackage, CudaPackage):
@classmethod
def determine_variants(cls, exes, version_str):
variants, compilers = ['+clang'], {}
variants, compilers = ["+clang"], {}
lld_found, lldb_found = False, False
for exe in exes:
if 'clang++' in exe:
compilers['cxx'] = exe
elif 'clang' in exe:
compilers['c'] = exe
elif 'flang' in exe:
variants.append('+flang')
compilers['fc'] = exe
compilers['f77'] = exe
elif 'ld.lld' in exe:
if "clang++" in exe:
compilers["cxx"] = exe
elif "clang" in exe:
compilers["c"] = exe
elif "flang" in exe:
variants.append("+flang")
compilers["fc"] = exe
compilers["f77"] = exe
elif "ld.lld" in exe:
lld_found = True
compilers['ld'] = exe
elif 'lldb' in exe:
compilers["ld"] = exe
elif "lldb" in exe:
lldb_found = True
compilers['lldb'] = exe
compilers["lldb"] = exe
variants.append('+lld' if lld_found else '~lld')
variants.append('+lldb' if lldb_found else '~lldb')
variants.append("+lld" if lld_found else "~lld")
variants.append("+lldb" if lldb_found else "~lldb")
return ''.join(variants), {'compilers': compilers}
return "".join(variants), {"compilers": compilers}
@classmethod
def validate_detected_spec(cls, spec, extra_attributes):
# For LLVM 'compilers' is a mandatory attribute
msg = ('the extra attribute "compilers" must be set for '
'the detected spec "{0}"'.format(spec))
assert 'compilers' in extra_attributes, msg
compilers = extra_attributes['compilers']
for key in ('c', 'cxx'):
msg = '{0} compiler not found for {1}'
msg = 'the extra attribute "compilers" must be set for ' 'the detected spec "{0}"'.format(
spec
)
assert "compilers" in extra_attributes, msg
compilers = extra_attributes["compilers"]
for key in ("c", "cxx"):
msg = "{0} compiler not found for {1}"
assert key in compilers, msg.format(key, spec)
@property
@ -426,10 +441,10 @@ class Llvm(CMakePackage, CudaPackage):
msg = "cannot retrieve C compiler [spec is not concrete]"
assert self.spec.concrete, msg
if self.spec.external:
return self.spec.extra_attributes['compilers'].get('c', None)
return self.spec.extra_attributes["compilers"].get("c", None)
result = None
if '+clang' in self.spec:
result = os.path.join(self.spec.prefix.bin, 'clang')
if "+clang" in self.spec:
result = os.path.join(self.spec.prefix.bin, "clang")
return result
@property
@ -437,10 +452,10 @@ class Llvm(CMakePackage, CudaPackage):
msg = "cannot retrieve C++ compiler [spec is not concrete]"
assert self.spec.concrete, msg
if self.spec.external:
return self.spec.extra_attributes['compilers'].get('cxx', None)
return self.spec.extra_attributes["compilers"].get("cxx", None)
result = None
if '+clang' in self.spec:
result = os.path.join(self.spec.prefix.bin, 'clang++')
if "+clang" in self.spec:
result = os.path.join(self.spec.prefix.bin, "clang++")
return result
@property
@ -448,10 +463,10 @@ class Llvm(CMakePackage, CudaPackage):
msg = "cannot retrieve Fortran compiler [spec is not concrete]"
assert self.spec.concrete, msg
if self.spec.external:
return self.spec.extra_attributes['compilers'].get('fc', None)
return self.spec.extra_attributes["compilers"].get("fc", None)
result = None
if '+flang' in self.spec:
result = os.path.join(self.spec.prefix.bin, 'flang')
if "+flang" in self.spec:
result = os.path.join(self.spec.prefix.bin, "flang")
return result
@property
@ -459,27 +474,25 @@ class Llvm(CMakePackage, CudaPackage):
msg = "cannot retrieve Fortran 77 compiler [spec is not concrete]"
assert self.spec.concrete, msg
if self.spec.external:
return self.spec.extra_attributes['compilers'].get('f77', None)
return self.spec.extra_attributes["compilers"].get("f77", None)
result = None
if '+flang' in self.spec:
result = os.path.join(self.spec.prefix.bin, 'flang')
if "+flang" in self.spec:
result = os.path.join(self.spec.prefix.bin, "flang")
return result
@property
def libs(self):
return LibraryList(self.llvm_config("--libfiles", "all",
result="list"))
return LibraryList(self.llvm_config("--libfiles", "all", result="list"))
@run_before('cmake')
@run_before("cmake")
def codesign_check(self):
if self.spec.satisfies("+code_signing"):
codesign = which('codesign')
mkdir('tmp')
llvm_check_file = join_path('tmp', 'llvm_check')
copy('/usr/bin/false', llvm_check_file)
codesign = which("codesign")
mkdir("tmp")
llvm_check_file = join_path("tmp", "llvm_check")
copy("/usr/bin/false", llvm_check_file)
try:
codesign('-f', '-s', 'lldb_codesign', '--dryrun',
llvm_check_file)
codesign("-f", "-s", "lldb_codesign", "--dryrun", llvm_check_file)
except ProcessError:
# Newer LLVM versions have a simple script that sets up
@ -489,32 +502,32 @@ class Llvm(CMakePackage, CudaPackage):
setup()
except Exception:
raise RuntimeError(
'spack was unable to either find or set up'
'code-signing on your system. Please refer to'
'https://lldb.llvm.org/resources/build.html#'
'code-signing-on-macos for details on how to'
'create this identity.'
"spack was unable to either find or set up"
"code-signing on your system. Please refer to"
"https://lldb.llvm.org/resources/build.html#"
"code-signing-on-macos for details on how to"
"create this identity."
)
def flag_handler(self, name, flags):
if name == 'cxxflags':
if name == "cxxflags":
flags.append(self.compiler.cxx11_flag)
return (None, flags, None)
elif name == 'ldflags' and self.spec.satisfies('%intel'):
flags.append('-shared-intel')
elif name == "ldflags" and self.spec.satisfies("%intel"):
flags.append("-shared-intel")
return (None, flags, None)
return (flags, None, None)
def setup_build_environment(self, env):
"""When using %clang, add only its ld.lld-$ver and/or ld.lld to our PATH"""
if self.compiler.name in ['clang', 'apple-clang']:
for lld in 'ld.lld-{0}'.format(self.compiler.version.version[0]), 'ld.lld':
if self.compiler.name in ["clang", "apple-clang"]:
for lld in "ld.lld-{0}".format(self.compiler.version.version[0]), "ld.lld":
bin = os.path.join(os.path.dirname(self.compiler.cc), lld)
sym = os.path.join(self.stage.path, 'ld.lld')
sym = os.path.join(self.stage.path, "ld.lld")
if os.path.exists(bin) and not os.path.exists(sym):
mkdirp(self.stage.path)
os.symlink(bin, sym)
env.prepend_path('PATH', self.stage.path)
env.prepend_path("PATH", self.stage.path)
def setup_run_environment(self, env):
if "+clang" in self.spec:
@ -531,7 +544,7 @@ class Llvm(CMakePackage, CudaPackage):
define = CMakePackage.define
from_variant = self.define_from_variant
python = spec['python']
python = spec["python"]
cmake_args = [
define("LLVM_REQUIRES_RTTI", True),
define("LLVM_ENABLE_RTTI", True),
@ -544,14 +557,13 @@ class Llvm(CMakePackage, CudaPackage):
define("LIBOMP_HWLOC_INSTALL_DIR", spec["hwloc"].prefix),
]
version_suffix = spec.variants['version_suffix'].value
if version_suffix != 'none':
cmake_args.append(define('LLVM_VERSION_SUFFIX', version_suffix))
version_suffix = spec.variants["version_suffix"].value
if version_suffix != "none":
cmake_args.append(define("LLVM_VERSION_SUFFIX", version_suffix))
shlib_symbol_version = spec.variants.get('shlib_symbol_version', None)
if shlib_symbol_version is not None and shlib_symbol_version.value != 'none':
cmake_args.append(define('LLVM_SHLIB_SYMBOL_VERSION',
shlib_symbol_version.value))
shlib_symbol_version = spec.variants.get("shlib_symbol_version", None)
if shlib_symbol_version is not None and shlib_symbol_version.value != "none":
cmake_args.append(define("LLVM_SHLIB_SYMBOL_VERSION", shlib_symbol_version.value))
if python.version >= Version("3"):
cmake_args.append(define("Python3_EXECUTABLE", python.command.path))
@ -562,47 +574,56 @@ class Llvm(CMakePackage, CudaPackage):
runtimes = []
if "+cuda" in spec:
cmake_args.extend([
cmake_args.extend(
[
define("CUDA_TOOLKIT_ROOT_DIR", spec["cuda"].prefix),
define("LIBOMPTARGET_NVPTX_COMPUTE_CAPABILITIES",
",".join(spec.variants["cuda_arch"].value)),
define("CLANG_OPENMP_NVPTX_DEFAULT_ARCH",
"sm_{0}".format(spec.variants["cuda_arch"].value[-1])),
])
define(
"LIBOMPTARGET_NVPTX_COMPUTE_CAPABILITIES",
",".join(spec.variants["cuda_arch"].value),
),
define(
"CLANG_OPENMP_NVPTX_DEFAULT_ARCH",
"sm_{0}".format(spec.variants["cuda_arch"].value[-1]),
),
]
)
if "+omp_as_runtime" in spec:
cmake_args.extend([
cmake_args.extend(
[
define("LIBOMPTARGET_NVPTX_ENABLE_BCLIB", True),
# work around bad libelf detection in libomptarget
define("LIBOMPTARGET_DEP_LIBELF_INCLUDE_DIR",
spec["libelf"].prefix.include),
])
define(
"LIBOMPTARGET_DEP_LIBELF_INCLUDE_DIR", spec["libelf"].prefix.include
),
]
)
else:
# still build libomptarget but disable cuda
cmake_args.extend([
cmake_args.extend(
[
define("CUDA_TOOLKIT_ROOT_DIR", "IGNORE"),
define("CUDA_SDK_ROOT_DIR", "IGNORE"),
define("CUDA_NVCC_EXECUTABLE", "IGNORE"),
define("LIBOMPTARGET_DEP_CUDA_DRIVER_LIBRARIES", "IGNORE"),
])
]
)
cmake_args.append(from_variant("LIBOMPTARGET_ENABLE_DEBUG", "omp_debug"))
if "+lldb" in spec:
projects.append("lldb")
cmake_args.append(define('LLDB_ENABLE_LIBEDIT', True))
cmake_args.append(define('LLDB_ENABLE_NCURSES', True))
cmake_args.append(define('LLDB_ENABLE_LIBXML2', False))
if spec.version >= Version('10'):
cmake_args.append(from_variant("LLDB_ENABLE_PYTHON", 'python'))
cmake_args.append(define("LLDB_ENABLE_LIBEDIT", True))
cmake_args.append(define("LLDB_ENABLE_NCURSES", True))
cmake_args.append(define("LLDB_ENABLE_LIBXML2", False))
if spec.version >= Version("10"):
cmake_args.append(from_variant("LLDB_ENABLE_PYTHON", "python"))
else:
cmake_args.append(define("LLDB_DISABLE_PYTHON", '~python' in spec))
cmake_args.append(define("LLDB_DISABLE_PYTHON", "~python" in spec))
if spec.satisfies("@5.0.0: +python"):
cmake_args.append(define("LLDB_USE_SYSTEM_SIX", True))
if "+gold" in spec:
cmake_args.append(
define("LLVM_BINUTILS_INCDIR", spec["binutils"].prefix.include)
)
cmake_args.append(define("LLVM_BINUTILS_INCDIR", spec["binutils"].prefix.include))
if "+clang" in spec:
projects.append("clang")
@ -612,10 +633,10 @@ class Llvm(CMakePackage, CudaPackage):
else:
projects.append("openmp")
if '@8' in spec:
cmake_args.append(from_variant('CLANG_ANALYZER_ENABLE_Z3_SOLVER', 'z3'))
elif '@9:' in spec:
cmake_args.append(from_variant('LLVM_ENABLE_Z3_SOLVER', 'z3'))
if "@8" in spec:
cmake_args.append(from_variant("CLANG_ANALYZER_ENABLE_Z3_SOLVER", "z3"))
elif "@9:" in spec:
cmake_args.append(from_variant("LLVM_ENABLE_Z3_SOLVER", "z3"))
if "+flang" in spec:
projects.append("flang")
@ -634,7 +655,8 @@ class Llvm(CMakePackage, CudaPackage):
projects.append("polly")
cmake_args.append(define("LINK_POLLY_INTO_TOOLS", True))
cmake_args.extend([
cmake_args.extend(
[
define("BUILD_SHARED_LIBS", False),
from_variant("LLVM_BUILD_LLVM_DYLIB", "llvm_dylib"),
from_variant("LLVM_LINK_LLVM_DYLIB", "link_llvm_dylib"),
@ -642,18 +664,17 @@ class Llvm(CMakePackage, CudaPackage):
# By default on Linux, libc++.so is a ldscript. CMake fails to add
# CMAKE_INSTALL_RPATH to it, which fails. Statically link libc++abi.a
# into libc++.so, linking with -lc++ or -stdlib=libc++ is enough.
define('LIBCXX_ENABLE_STATIC_ABI_LIBRARY', True)
])
define("LIBCXX_ENABLE_STATIC_ABI_LIBRARY", True),
]
)
cmake_args.append(define(
"LLVM_TARGETS_TO_BUILD",
get_llvm_targets_to_build(spec)))
cmake_args.append(define("LLVM_TARGETS_TO_BUILD", get_llvm_targets_to_build(spec)))
cmake_args.append(from_variant("LIBOMP_TSAN_SUPPORT", "omp_tsan"))
if self.compiler.name == "gcc":
compiler = Executable(self.compiler.cc)
gcc_output = compiler('-print-search-dirs', output=str, error=str)
gcc_output = compiler("-print-search-dirs", output=str, error=str)
for line in gcc_output.splitlines():
if line.startswith("install:"):
@ -665,7 +686,7 @@ class Llvm(CMakePackage, CudaPackage):
cmake_args.append(define("GCC_INSTALL_PREFIX", gcc_prefix))
if self.spec.satisfies("~code_signing platform=darwin"):
cmake_args.append(define('LLDB_USE_SYSTEM_DEBUGSERVER', True))
cmake_args.append(define("LLDB_USE_SYSTEM_DEBUGSERVER", True))
# Semicolon seperated list of projects to enable
cmake_args.append(define("LLVM_ENABLE_PROJECTS", projects))
@ -689,20 +710,24 @@ class Llvm(CMakePackage, CudaPackage):
# rebuild libomptarget to get bytecode runtime library files
with working_dir(ompdir, create=True):
cmake_args = [
'-G', 'Ninja',
define('CMAKE_BUILD_TYPE', spec.variants['build_type'].value),
"-G",
"Ninja",
define("CMAKE_BUILD_TYPE", spec.variants["build_type"].value),
define("CMAKE_C_COMPILER", spec.prefix.bin + "/clang"),
define("CMAKE_CXX_COMPILER", spec.prefix.bin + "/clang++"),
define("CMAKE_INSTALL_PREFIX", spec.prefix),
define('CMAKE_PREFIX_PATH', prefix_paths)
define("CMAKE_PREFIX_PATH", prefix_paths),
]
cmake_args.extend(self.cmake_args())
cmake_args.extend([
cmake_args.extend(
[
define("LIBOMPTARGET_NVPTX_ENABLE_BCLIB", True),
define("LIBOMPTARGET_DEP_LIBELF_INCLUDE_DIR",
spec["libelf"].prefix.include),
define(
"LIBOMPTARGET_DEP_LIBELF_INCLUDE_DIR", spec["libelf"].prefix.include
),
self.stage.source_path + "/openmp",
])
]
)
cmake(*cmake_args)
ninja()
@ -717,22 +742,22 @@ class Llvm(CMakePackage, CudaPackage):
install_tree("bin", join_path(self.prefix, "libexec", "llvm"))
def llvm_config(self, *args, **kwargs):
lc = Executable(self.prefix.bin.join('llvm-config'))
if not kwargs.get('output'):
kwargs['output'] = str
lc = Executable(self.prefix.bin.join("llvm-config"))
if not kwargs.get("output"):
kwargs["output"] = str
ret = lc(*args, **kwargs)
if kwargs.get('result') == "list":
if kwargs.get("result") == "list":
return ret.split()
else:
return ret
def get_llvm_targets_to_build(spec):
targets = spec.variants['targets'].value
targets = spec.variants["targets"].value
# Build everything?
if 'all' in targets:
return 'all'
if "all" in targets:
return "all"
# Convert targets variant values to CMake LLVM_TARGETS_TO_BUILD array.
spack_to_cmake = {
@ -753,10 +778,10 @@ def get_llvm_targets_to_build(spec):
"systemz": "SystemZ",
"webassembly": "WebAssembly",
"x86": "X86",
"xcore": "XCore"
"xcore": "XCore",
}
if 'none' in targets:
if "none" in targets:
llvm_targets = set()
else:
llvm_targets = set(spack_to_cmake[target] for target in targets)

View file

@ -24,125 +24,138 @@ class PyTorch(PythonPackage, CudaPackage):
homepage = "https://pytorch.org/"
git = "https://github.com/pytorch/pytorch.git"
maintainers = ['adamjstewart']
maintainers("adamjstewart")
# Exact set of modules is version- and variant-specific, just attempt to import the
# core libraries to ensure that the package was successfully installed.
import_modules = ['torch', 'torch.autograd', 'torch.nn', 'torch.utils']
import_modules = ["torch", "torch.autograd", "torch.nn", "torch.utils"]
version('master', branch='master', submodules=True)
version('1.10.1', tag='v1.10.1', submodules=True)
version('1.10.0', tag='v1.10.0', submodules=True)
version('1.9.1', tag='v1.9.1', submodules=True)
version('1.9.0', tag='v1.9.0', submodules=True)
version('1.8.2', tag='v1.8.2', submodules=True)
version('1.8.1', tag='v1.8.1', submodules=True)
version('1.8.0', tag='v1.8.0', submodules=True)
version('1.7.1', tag='v1.7.1', submodules=True)
version('1.7.0', tag='v1.7.0', submodules=True)
version('1.6.0', tag='v1.6.0', submodules=True)
version('1.5.1', tag='v1.5.1', submodules=True)
version('1.5.0', tag='v1.5.0', submodules=True)
version('1.4.1', tag='v1.4.1', submodules=True)
version('1.4.0', tag='v1.4.0', submodules=True, deprecated=True,
submodules_delete=['third_party/fbgemm'])
version('1.3.1', tag='v1.3.1', submodules=True)
version('1.3.0', tag='v1.3.0', submodules=True)
version('1.2.0', tag='v1.2.0', submodules=True)
version('1.1.0', tag='v1.1.0', submodules=True)
version('1.0.1', tag='v1.0.1', submodules=True)
version('1.0.0', tag='v1.0.0', submodules=True)
version('0.4.1', tag='v0.4.1', submodules=True, deprecated=True,
submodules_delete=['third_party/nervanagpu'])
version('0.4.0', tag='v0.4.0', submodules=True, deprecated=True)
version('0.3.1', tag='v0.3.1', submodules=True, deprecated=True)
version("master", branch="master", submodules=True)
version("1.10.1", tag="v1.10.1", submodules=True)
version("1.10.0", tag="v1.10.0", submodules=True)
version("1.9.1", tag="v1.9.1", submodules=True)
version("1.9.0", tag="v1.9.0", submodules=True)
version("1.8.2", tag="v1.8.2", submodules=True)
version("1.8.1", tag="v1.8.1", submodules=True)
version("1.8.0", tag="v1.8.0", submodules=True)
version("1.7.1", tag="v1.7.1", submodules=True)
version("1.7.0", tag="v1.7.0", submodules=True)
version("1.6.0", tag="v1.6.0", submodules=True)
version("1.5.1", tag="v1.5.1", submodules=True)
version("1.5.0", tag="v1.5.0", submodules=True)
version("1.4.1", tag="v1.4.1", submodules=True)
version(
"1.4.0",
tag="v1.4.0",
submodules=True,
deprecated=True,
submodules_delete=["third_party/fbgemm"],
)
version("1.3.1", tag="v1.3.1", submodules=True)
version("1.3.0", tag="v1.3.0", submodules=True)
version("1.2.0", tag="v1.2.0", submodules=True)
version("1.1.0", tag="v1.1.0", submodules=True)
version("1.0.1", tag="v1.0.1", submodules=True)
version("1.0.0", tag="v1.0.0", submodules=True)
version(
"0.4.1",
tag="v0.4.1",
submodules=True,
deprecated=True,
submodules_delete=["third_party/nervanagpu"],
)
version("0.4.0", tag="v0.4.0", submodules=True, deprecated=True)
version("0.3.1", tag="v0.3.1", submodules=True, deprecated=True)
is_darwin = sys.platform == 'darwin'
is_darwin = sys.platform == "darwin"
# All options are defined in CMakeLists.txt.
# Some are listed in setup.py, but not all.
variant('caffe2', default=True, description='Build Caffe2')
variant('test', default=False, description='Build C++ test binaries')
variant('cuda', default=not is_darwin, description='Use CUDA')
variant('rocm', default=False, description='Use ROCm')
variant('cudnn', default=not is_darwin, description='Use cuDNN')
variant('fbgemm', default=True, description='Use FBGEMM (quantized 8-bit server operators)')
variant('kineto', default=True, description='Use Kineto profiling library')
variant('magma', default=not is_darwin, description='Use MAGMA')
variant('metal', default=is_darwin, description='Use Metal for Caffe2 iOS build')
variant('nccl', default=not is_darwin, description='Use NCCL')
variant('nnpack', default=True, description='Use NNPACK')
variant('numa', default=not is_darwin, description='Use NUMA')
variant('numpy', default=True, description='Use NumPy')
variant('openmp', default=True, description='Use OpenMP for parallel code')
variant('qnnpack', default=True, description='Use QNNPACK (quantized 8-bit operators)')
variant('valgrind', default=not is_darwin, description='Use Valgrind')
variant('xnnpack', default=True, description='Use XNNPACK')
variant('mkldnn', default=True, description='Use MKLDNN')
variant('distributed', default=not is_darwin, description='Use distributed')
variant('mpi', default=not is_darwin, description='Use MPI for Caffe2')
variant('gloo', default=not is_darwin, description='Use Gloo')
variant('tensorpipe', default=not is_darwin, description='Use TensorPipe')
variant('onnx_ml', default=True, description='Enable traditional ONNX ML API')
variant('breakpad', default=True, description='Enable breakpad crash dump library')
variant("caffe2", default=True, description="Build Caffe2")
variant("test", default=False, description="Build C++ test binaries")
variant("cuda", default=not is_darwin, description="Use CUDA")
variant("rocm", default=False, description="Use ROCm")
variant("cudnn", default=not is_darwin, description="Use cuDNN")
variant("fbgemm", default=True, description="Use FBGEMM (quantized 8-bit server operators)")
variant("kineto", default=True, description="Use Kineto profiling library")
variant("magma", default=not is_darwin, description="Use MAGMA")
variant("metal", default=is_darwin, description="Use Metal for Caffe2 iOS build")
variant("nccl", default=not is_darwin, description="Use NCCL")
variant("nnpack", default=True, description="Use NNPACK")
variant("numa", default=not is_darwin, description="Use NUMA")
variant("numpy", default=True, description="Use NumPy")
variant("openmp", default=True, description="Use OpenMP for parallel code")
variant("qnnpack", default=True, description="Use QNNPACK (quantized 8-bit operators)")
variant("valgrind", default=not is_darwin, description="Use Valgrind")
variant("xnnpack", default=True, description="Use XNNPACK")
variant("mkldnn", default=True, description="Use MKLDNN")
variant("distributed", default=not is_darwin, description="Use distributed")
variant("mpi", default=not is_darwin, description="Use MPI for Caffe2")
variant("gloo", default=not is_darwin, description="Use Gloo")
variant("tensorpipe", default=not is_darwin, description="Use TensorPipe")
variant("onnx_ml", default=True, description="Enable traditional ONNX ML API")
variant("breakpad", default=True, description="Enable breakpad crash dump library")
conflicts('+cuda', when='+rocm')
conflicts('+cudnn', when='~cuda')
conflicts('+magma', when='~cuda')
conflicts('+nccl', when='~cuda~rocm')
conflicts('+nccl', when='platform=darwin')
conflicts('+numa', when='platform=darwin', msg='Only available on Linux')
conflicts('+valgrind', when='platform=darwin', msg='Only available on Linux')
conflicts('+mpi', when='~distributed')
conflicts('+gloo', when='~distributed')
conflicts('+tensorpipe', when='~distributed')
conflicts('+kineto', when='@:1.7')
conflicts('+valgrind', when='@:1.7')
conflicts('~caffe2', when='@0.4.0:1.6') # no way to disable caffe2?
conflicts('+caffe2', when='@:0.3.1') # caffe2 did not yet exist?
conflicts('+tensorpipe', when='@:1.5')
conflicts('+xnnpack', when='@:1.4')
conflicts('~onnx_ml', when='@:1.4') # no way to disable ONNX?
conflicts('+rocm', when='@:0.4')
conflicts('+cudnn', when='@:0.4')
conflicts('+fbgemm', when='@:0.4,1.4.0')
conflicts('+qnnpack', when='@:0.4')
conflicts('+mkldnn', when='@:0.4')
conflicts('+breakpad', when='@:1.9') # Option appeared in 1.10.0
conflicts('+breakpad', when='target=ppc64:', msg='Unsupported')
conflicts('+breakpad', when='target=ppc64le:', msg='Unsupported')
conflicts("+cuda", when="+rocm")
conflicts("+cudnn", when="~cuda")
conflicts("+magma", when="~cuda")
conflicts("+nccl", when="~cuda~rocm")
conflicts("+nccl", when="platform=darwin")
conflicts("+numa", when="platform=darwin", msg="Only available on Linux")
conflicts("+valgrind", when="platform=darwin", msg="Only available on Linux")
conflicts("+mpi", when="~distributed")
conflicts("+gloo", when="~distributed")
conflicts("+tensorpipe", when="~distributed")
conflicts("+kineto", when="@:1.7")
conflicts("+valgrind", when="@:1.7")
conflicts("~caffe2", when="@0.4.0:1.6") # no way to disable caffe2?
conflicts("+caffe2", when="@:0.3.1") # caffe2 did not yet exist?
conflicts("+tensorpipe", when="@:1.5")
conflicts("+xnnpack", when="@:1.4")
conflicts("~onnx_ml", when="@:1.4") # no way to disable ONNX?
conflicts("+rocm", when="@:0.4")
conflicts("+cudnn", when="@:0.4")
conflicts("+fbgemm", when="@:0.4,1.4.0")
conflicts("+qnnpack", when="@:0.4")
conflicts("+mkldnn", when="@:0.4")
conflicts("+breakpad", when="@:1.9") # Option appeared in 1.10.0
conflicts("+breakpad", when="target=ppc64:", msg="Unsupported")
conflicts("+breakpad", when="target=ppc64le:", msg="Unsupported")
conflicts('cuda_arch=none', when='+cuda',
msg='Must specify CUDA compute capabilities of your GPU, see '
'https://developer.nvidia.com/cuda-gpus')
conflicts(
"cuda_arch=none",
when="+cuda",
msg="Must specify CUDA compute capabilities of your GPU, see "
"https://developer.nvidia.com/cuda-gpus",
)
# Required dependencies
depends_on('cmake@3.5:', type='build')
depends_on("cmake@3.5:", type="build")
# Use Ninja generator to speed up build times, automatically used if found
depends_on('ninja@1.5:', when='@1.1.0:', type='build')
depends_on("ninja@1.5:", when="@1.1.0:", type="build")
# See python_min_version in setup.py
depends_on('python@3.6.2:', when='@1.7.1:', type=('build', 'link', 'run'))
depends_on('python@3.6.1:', when='@1.6.0:1.7.0', type=('build', 'link', 'run'))
depends_on('python@3.5:', when='@1.5.0:1.5', type=('build', 'link', 'run'))
depends_on('python@2.7:2.8,3.5:', when='@1.4.0:1.4', type=('build', 'link', 'run'))
depends_on('python@2.7:2.8,3.5:3.7', when='@:1.3', type=('build', 'link', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-future', when='@1.5:', type=('build', 'run'))
depends_on('py-future', when='@1.1: ^python@:2', type=('build', 'run'))
depends_on('py-pyyaml', type=('build', 'run'))
depends_on('py-typing', when='@0.4: ^python@:3.4', type=('build', 'run'))
depends_on('py-typing-extensions', when='@1.7:', type=('build', 'run'))
depends_on('py-pybind11@2.6.2', when='@1.8.0:', type=('build', 'link', 'run'))
depends_on('py-pybind11@2.3.0', when='@1.1.0:1.7', type=('build', 'link', 'run'))
depends_on('py-pybind11@2.2.4', when='@1.0.0:1.0', type=('build', 'link', 'run'))
depends_on('py-pybind11@2.2.2', when='@0.4.0:0.4', type=('build', 'link', 'run'))
depends_on('py-dataclasses', when='@1.7: ^python@3.6.0:3.6', type=('build', 'run'))
depends_on('py-tqdm', type='run')
depends_on('py-protobuf', when='@0.4:', type=('build', 'run'))
depends_on('protobuf', when='@0.4:')
depends_on('blas')
depends_on('lapack')
depends_on('eigen', when='@0.4:')
depends_on("python@3.6.2:", when="@1.7.1:", type=("build", "link", "run"))
depends_on("python@3.6.1:", when="@1.6.0:1.7.0", type=("build", "link", "run"))
depends_on("python@3.5:", when="@1.5.0:1.5", type=("build", "link", "run"))
depends_on("python@2.7:2.8,3.5:", when="@1.4.0:1.4", type=("build", "link", "run"))
depends_on("python@2.7:2.8,3.5:3.7", when="@:1.3", type=("build", "link", "run"))
depends_on("py-setuptools", type=("build", "run"))
depends_on("py-future", when="@1.5:", type=("build", "run"))
depends_on("py-future", when="@1.1: ^python@:2", type=("build", "run"))
depends_on("py-pyyaml", type=("build", "run"))
depends_on("py-typing", when="@0.4: ^python@:3.4", type=("build", "run"))
depends_on("py-typing-extensions", when="@1.7:", type=("build", "run"))
depends_on("py-pybind11@2.6.2", when="@1.8.0:", type=("build", "link", "run"))
depends_on("py-pybind11@2.3.0", when="@1.1.0:1.7", type=("build", "link", "run"))
depends_on("py-pybind11@2.2.4", when="@1.0.0:1.0", type=("build", "link", "run"))
depends_on("py-pybind11@2.2.2", when="@0.4.0:0.4", type=("build", "link", "run"))
depends_on("py-dataclasses", when="@1.7: ^python@3.6.0:3.6", type=("build", "run"))
depends_on("py-tqdm", type="run")
depends_on("py-protobuf", when="@0.4:", type=("build", "run"))
depends_on("protobuf", when="@0.4:")
depends_on("blas")
depends_on("lapack")
depends_on("eigen", when="@0.4:")
# https://github.com/pytorch/pytorch/issues/60329
# depends_on('cpuinfo@2020-12-17', when='@1.8.0:')
# depends_on('cpuinfo@2020-06-11', when='@1.6.0:1.7')
@ -152,30 +165,30 @@ class PyTorch(PythonPackage, CudaPackage):
# depends_on('sleef@3.4.0_2019-07-30', when='@1.6.0:1.7')
# https://github.com/Maratyszcza/FP16/issues/18
# depends_on('fp16@2020-05-14', when='@1.6.0:')
depends_on('pthreadpool@2021-04-13', when='@1.9.0:')
depends_on('pthreadpool@2020-10-05', when='@1.8.0:1.8')
depends_on('pthreadpool@2020-06-15', when='@1.6.0:1.7')
depends_on('psimd@2020-05-17', when='@1.6.0:')
depends_on('fxdiv@2020-04-17', when='@1.6.0:')
depends_on('benchmark', when='@1.6:+test')
depends_on("pthreadpool@2021-04-13", when="@1.9.0:")
depends_on("pthreadpool@2020-10-05", when="@1.8.0:1.8")
depends_on("pthreadpool@2020-06-15", when="@1.6.0:1.7")
depends_on("psimd@2020-05-17", when="@1.6.0:")
depends_on("fxdiv@2020-04-17", when="@1.6.0:")
depends_on("benchmark", when="@1.6:+test")
# Optional dependencies
depends_on('cuda@7.5:', when='+cuda', type=('build', 'link', 'run'))
depends_on('cuda@9:', when='@1.1:+cuda', type=('build', 'link', 'run'))
depends_on('cuda@9.2:', when='@1.6:+cuda', type=('build', 'link', 'run'))
depends_on('cudnn@6.0:7', when='@:1.0+cudnn')
depends_on('cudnn@7.0:7', when='@1.1.0:1.5+cudnn')
depends_on('cudnn@7.0:', when='@1.6.0:+cudnn')
depends_on('magma', when='+magma')
depends_on('nccl', when='+nccl')
depends_on('numactl', when='+numa')
depends_on('py-numpy', when='+numpy', type=('build', 'run'))
depends_on('llvm-openmp', when='%apple-clang +openmp')
depends_on('valgrind', when='+valgrind')
depends_on("cuda@7.5:", when="+cuda", type=("build", "link", "run"))
depends_on("cuda@9:", when="@1.1:+cuda", type=("build", "link", "run"))
depends_on("cuda@9.2:", when="@1.6:+cuda", type=("build", "link", "run"))
depends_on("cudnn@6.0:7", when="@:1.0+cudnn")
depends_on("cudnn@7.0:7", when="@1.1.0:1.5+cudnn")
depends_on("cudnn@7.0:", when="@1.6.0:+cudnn")
depends_on("magma", when="+magma")
depends_on("nccl", when="+nccl")
depends_on("numactl", when="+numa")
depends_on("py-numpy", when="+numpy", type=("build", "run"))
depends_on("llvm-openmp", when="%apple-clang +openmp")
depends_on("valgrind", when="+valgrind")
# https://github.com/pytorch/pytorch/issues/60332
# depends_on('xnnpack@2021-02-22', when='@1.8.0:+xnnpack')
# depends_on('xnnpack@2020-03-23', when='@1.6.0:1.7+xnnpack')
depends_on('mpi', when='+mpi')
depends_on("mpi", when="+mpi")
# https://github.com/pytorch/pytorch/issues/60270
# depends_on('gloo@2021-05-04', when='@1.9.0:+gloo')
# depends_on('gloo@2020-09-18', when='@1.7.0:1.8+gloo')
@ -183,31 +196,35 @@ class PyTorch(PythonPackage, CudaPackage):
# https://github.com/pytorch/pytorch/issues/60331
# depends_on('onnx@1.8.0_2020-11-03', when='@1.8.0:+onnx_ml')
# depends_on('onnx@1.7.0_2020-05-31', when='@1.6.0:1.7+onnx_ml')
depends_on('mkl', when='+mkldnn')
depends_on("mkl", when="+mkldnn")
# Test dependencies
depends_on('py-hypothesis', type='test')
depends_on('py-six', type='test')
depends_on('py-psutil', type='test')
depends_on("py-hypothesis", type="test")
depends_on("py-six", type="test")
depends_on("py-psutil", type="test")
# Fix BLAS being overridden by MKL
# https://github.com/pytorch/pytorch/issues/60328
patch('https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/59220.patch',
sha256='e37afffe45cf7594c22050109942370e49983ad772d12ebccf508377dc9dcfc9',
when='@1.2.0:')
patch(
"https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/59220.patch",
sha256="e37afffe45cf7594c22050109942370e49983ad772d12ebccf508377dc9dcfc9",
when="@1.2.0:",
)
# Fixes build on older systems with glibc <2.12
patch('https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/55063.patch',
sha256='e17eaa42f5d7c18bf0d7c37d7b0910127a01ad53fdce3e226a92893356a70395',
when='@1.1.0:1.8.1')
patch(
"https://patch-diff.githubusercontent.com/raw/pytorch/pytorch/pull/55063.patch",
sha256="e17eaa42f5d7c18bf0d7c37d7b0910127a01ad53fdce3e226a92893356a70395",
when="@1.1.0:1.8.1",
)
# Fixes CMake configuration error when XNNPACK is disabled
# https://github.com/pytorch/pytorch/pull/35607
# https://github.com/pytorch/pytorch/pull/37865
patch('xnnpack.patch', when='@1.5.0:1.5')
patch("xnnpack.patch", when="@1.5.0:1.5")
# Fixes build error when ROCm is enabled for pytorch-1.5 release
patch('rocm.patch', when='@1.5.0:1.5+rocm')
patch("rocm.patch", when="@1.5.0:1.5+rocm")
# Fixes fatal error: sleef.h: No such file or directory
# https://github.com/pytorch/pytorch/pull/35359
@ -216,47 +233,56 @@ class PyTorch(PythonPackage, CudaPackage):
# Fixes compilation with Clang 9.0.0 and Apple Clang 11.0.3
# https://github.com/pytorch/pytorch/pull/37086
patch('https://github.com/pytorch/pytorch/commit/e921cd222a8fbeabf5a3e74e83e0d8dfb01aa8b5.patch',
sha256='17561b16cd2db22f10c0fe1fdcb428aecb0ac3964ba022a41343a6bb8cba7049',
when='@1.1:1.5')
patch(
"https://github.com/pytorch/pytorch/commit/e921cd222a8fbeabf5a3e74e83e0d8dfb01aa8b5.patch",
sha256="17561b16cd2db22f10c0fe1fdcb428aecb0ac3964ba022a41343a6bb8cba7049",
when="@1.1:1.5",
)
# Removes duplicate definition of getCusparseErrorString
# https://github.com/pytorch/pytorch/issues/32083
patch('cusparseGetErrorString.patch', when='@0.4.1:1.0^cuda@10.1.243:')
patch("cusparseGetErrorString.patch", when="@0.4.1:1.0^cuda@10.1.243:")
# Fixes 'FindOpenMP.cmake'
# to detect openmp settings used by Fujitsu compiler.
patch('detect_omp_of_fujitsu_compiler.patch', when='%fj')
patch("detect_omp_of_fujitsu_compiler.patch", when="%fj")
# Fix compilation of +distributed~tensorpipe
# https://github.com/pytorch/pytorch/issues/68002
patch('https://github.com/pytorch/pytorch/commit/c075f0f633fa0136e68f0a455b5b74d7b500865c.patch',
sha256='e69e41b5c171bfb00d1b5d4ee55dd5e4c8975483230274af4ab461acd37e40b8', when='@1.10.0+distributed~tensorpipe')
patch(
"https://github.com/pytorch/pytorch/commit/c075f0f633fa0136e68f0a455b5b74d7b500865c.patch",
sha256="e69e41b5c171bfb00d1b5d4ee55dd5e4c8975483230274af4ab461acd37e40b8",
when="@1.10.0+distributed~tensorpipe",
)
# Both build and install run cmake/make/make install
# Only run once to speed up build times
phases = ['install']
phases = ["install"]
@property
def libs(self):
root = join_path(self.prefix, self.spec['python'].package.site_packages_dir,
'torch', 'lib')
return find_libraries('libtorch', root)
root = join_path(
self.prefix, self.spec["python"].package.site_packages_dir, "torch", "lib"
)
return find_libraries("libtorch", root)
@property
def headers(self):
root = join_path(self.prefix, self.spec['python'].package.site_packages_dir,
'torch', 'include')
root = join_path(
self.prefix, self.spec["python"].package.site_packages_dir, "torch", "include"
)
headers = find_all_headers(root)
headers.directories = [root]
return headers
@when('@1.5.0:')
@when("@1.5.0:")
def patch(self):
# https://github.com/pytorch/pytorch/issues/52208
filter_file('torch_global_deps PROPERTIES LINKER_LANGUAGE C',
'torch_global_deps PROPERTIES LINKER_LANGUAGE CXX',
'caffe2/CMakeLists.txt')
filter_file(
"torch_global_deps PROPERTIES LINKER_LANGUAGE C",
"torch_global_deps PROPERTIES LINKER_LANGUAGE CXX",
"caffe2/CMakeLists.txt",
)
def setup_build_environment(self, env):
"""Set environment variables used to control the build.
@ -269,7 +295,8 @@ class PyTorch(PythonPackage, CudaPackage):
most flags defined in ``CMakeLists.txt`` can be specified as
environment variables.
"""
def enable_or_disable(variant, keyword='USE', var=None, newer=False):
def enable_or_disable(variant, keyword="USE", var=None, newer=False):
"""Set environment variable to enable or disable support for a
particular variant.
@ -284,137 +311,135 @@ class PyTorch(PythonPackage, CudaPackage):
# Version 1.1.0 switched from NO_* to USE_* or BUILD_*
# But some newer variants have always used USE_* or BUILD_*
if self.spec.satisfies('@1.1:') or newer:
if '+' + variant in self.spec:
env.set(keyword + '_' + var, 'ON')
if self.spec.satisfies("@1.1:") or newer:
if "+" + variant in self.spec:
env.set(keyword + "_" + var, "ON")
else:
env.set(keyword + '_' + var, 'OFF')
env.set(keyword + "_" + var, "OFF")
else:
if '+' + variant in self.spec:
env.unset('NO_' + var)
if "+" + variant in self.spec:
env.unset("NO_" + var)
else:
env.set('NO_' + var, 'ON')
env.set("NO_" + var, "ON")
# Build in parallel to speed up build times
env.set('MAX_JOBS', make_jobs)
env.set("MAX_JOBS", make_jobs)
# Spack logs have trouble handling colored output
env.set('COLORIZE_OUTPUT', 'OFF')
env.set("COLORIZE_OUTPUT", "OFF")
if self.spec.satisfies('@0.4:'):
enable_or_disable('test', keyword='BUILD')
if self.spec.satisfies("@0.4:"):
enable_or_disable("test", keyword="BUILD")
if self.spec.satisfies('@1.7:'):
enable_or_disable('caffe2', keyword='BUILD')
if self.spec.satisfies("@1.7:"):
enable_or_disable("caffe2", keyword="BUILD")
enable_or_disable('cuda')
if '+cuda' in self.spec:
enable_or_disable("cuda")
if "+cuda" in self.spec:
# cmake/public/cuda.cmake
# cmake/Modules_CUDA_fix/upstream/FindCUDA.cmake
env.unset('CUDA_ROOT')
torch_cuda_arch = ';'.join('{0:.1f}'.format(float(i) / 10.0) for i
in
self.spec.variants['cuda_arch'].value)
env.set('TORCH_CUDA_ARCH_LIST', torch_cuda_arch)
env.unset("CUDA_ROOT")
torch_cuda_arch = ";".join(
"{0:.1f}".format(float(i) / 10.0) for i in self.spec.variants["cuda_arch"].value
)
env.set("TORCH_CUDA_ARCH_LIST", torch_cuda_arch)
enable_or_disable('rocm')
enable_or_disable("rocm")
enable_or_disable('cudnn')
if '+cudnn' in self.spec:
enable_or_disable("cudnn")
if "+cudnn" in self.spec:
# cmake/Modules_CUDA_fix/FindCUDNN.cmake
env.set('CUDNN_INCLUDE_DIR', self.spec['cudnn'].prefix.include)
env.set('CUDNN_LIBRARY', self.spec['cudnn'].libs[0])
env.set("CUDNN_INCLUDE_DIR", self.spec["cudnn"].prefix.include)
env.set("CUDNN_LIBRARY", self.spec["cudnn"].libs[0])
enable_or_disable('fbgemm')
if self.spec.satisfies('@1.8:'):
enable_or_disable('kineto')
enable_or_disable('magma')
enable_or_disable('metal')
if self.spec.satisfies('@1.10:'):
enable_or_disable('breakpad')
enable_or_disable("fbgemm")
if self.spec.satisfies("@1.8:"):
enable_or_disable("kineto")
enable_or_disable("magma")
enable_or_disable("metal")
if self.spec.satisfies("@1.10:"):
enable_or_disable("breakpad")
enable_or_disable('nccl')
if '+nccl' in self.spec:
env.set('NCCL_LIB_DIR', self.spec['nccl'].libs.directories[0])
env.set('NCCL_INCLUDE_DIR', self.spec['nccl'].prefix.include)
enable_or_disable("nccl")
if "+nccl" in self.spec:
env.set("NCCL_LIB_DIR", self.spec["nccl"].libs.directories[0])
env.set("NCCL_INCLUDE_DIR", self.spec["nccl"].prefix.include)
# cmake/External/nnpack.cmake
enable_or_disable('nnpack')
enable_or_disable("nnpack")
enable_or_disable('numa')
if '+numa' in self.spec:
enable_or_disable("numa")
if "+numa" in self.spec:
# cmake/Modules/FindNuma.cmake
env.set('NUMA_ROOT_DIR', self.spec['numactl'].prefix)
env.set("NUMA_ROOT_DIR", self.spec["numactl"].prefix)
# cmake/Modules/FindNumPy.cmake
enable_or_disable('numpy')
enable_or_disable("numpy")
# cmake/Modules/FindOpenMP.cmake
enable_or_disable('openmp', newer=True)
enable_or_disable('qnnpack')
if self.spec.satisfies('@1.3:'):
enable_or_disable('qnnpack', var='PYTORCH_QNNPACK')
if self.spec.satisfies('@1.8:'):
enable_or_disable('valgrind')
if self.spec.satisfies('@1.5:'):
enable_or_disable('xnnpack')
enable_or_disable('mkldnn')
enable_or_disable('distributed')
enable_or_disable('mpi')
enable_or_disable("openmp", newer=True)
enable_or_disable("qnnpack")
if self.spec.satisfies("@1.3:"):
enable_or_disable("qnnpack", var="PYTORCH_QNNPACK")
if self.spec.satisfies("@1.8:"):
enable_or_disable("valgrind")
if self.spec.satisfies("@1.5:"):
enable_or_disable("xnnpack")
enable_or_disable("mkldnn")
enable_or_disable("distributed")
enable_or_disable("mpi")
# cmake/Modules/FindGloo.cmake
enable_or_disable('gloo', newer=True)
if self.spec.satisfies('@1.6:'):
enable_or_disable('tensorpipe')
enable_or_disable("gloo", newer=True)
if self.spec.satisfies("@1.6:"):
enable_or_disable("tensorpipe")
if '+onnx_ml' in self.spec:
env.set('ONNX_ML', 'ON')
if "+onnx_ml" in self.spec:
env.set("ONNX_ML", "ON")
else:
env.set('ONNX_ML', 'OFF')
env.set("ONNX_ML", "OFF")
if not self.spec.satisfies('@master'):
env.set('PYTORCH_BUILD_VERSION', self.version)
env.set('PYTORCH_BUILD_NUMBER', 0)
if not self.spec.satisfies("@master"):
env.set("PYTORCH_BUILD_VERSION", self.version)
env.set("PYTORCH_BUILD_NUMBER", 0)
# BLAS to be used by Caffe2
# Options defined in cmake/Dependencies.cmake and cmake/Modules/FindBLAS.cmake
if self.spec['blas'].name == 'atlas':
env.set('BLAS', 'ATLAS')
env.set('WITH_BLAS', 'atlas')
elif self.spec['blas'].name in ['blis', 'amdblis']:
env.set('BLAS', 'BLIS')
env.set('WITH_BLAS', 'blis')
elif self.spec['blas'].name == 'eigen':
env.set('BLAS', 'Eigen')
elif self.spec['lapack'].name in ['libflame', 'amdlibflame']:
env.set('BLAS', 'FLAME')
env.set('WITH_BLAS', 'FLAME')
elif self.spec['blas'].name in [
'intel-mkl', 'intel-parallel-studio', 'intel-oneapi-mkl']:
env.set('BLAS', 'MKL')
env.set('WITH_BLAS', 'mkl')
elif self.spec['blas'].name == 'openblas':
env.set('BLAS', 'OpenBLAS')
env.set('WITH_BLAS', 'open')
elif self.spec['blas'].name == 'veclibfort':
env.set('BLAS', 'vecLib')
env.set('WITH_BLAS', 'veclib')
if self.spec["blas"].name == "atlas":
env.set("BLAS", "ATLAS")
env.set("WITH_BLAS", "atlas")
elif self.spec["blas"].name in ["blis", "amdblis"]:
env.set("BLAS", "BLIS")
env.set("WITH_BLAS", "blis")
elif self.spec["blas"].name == "eigen":
env.set("BLAS", "Eigen")
elif self.spec["lapack"].name in ["libflame", "amdlibflame"]:
env.set("BLAS", "FLAME")
env.set("WITH_BLAS", "FLAME")
elif self.spec["blas"].name in ["intel-mkl", "intel-parallel-studio", "intel-oneapi-mkl"]:
env.set("BLAS", "MKL")
env.set("WITH_BLAS", "mkl")
elif self.spec["blas"].name == "openblas":
env.set("BLAS", "OpenBLAS")
env.set("WITH_BLAS", "open")
elif self.spec["blas"].name == "veclibfort":
env.set("BLAS", "vecLib")
env.set("WITH_BLAS", "veclib")
else:
env.set('BLAS', 'Generic')
env.set('WITH_BLAS', 'generic')
env.set("BLAS", "Generic")
env.set("WITH_BLAS", "generic")
# Don't use vendored third-party libraries when possible
env.set('BUILD_CUSTOM_PROTOBUF', 'OFF')
env.set('USE_SYSTEM_NCCL', 'ON')
env.set('USE_SYSTEM_EIGEN_INSTALL', 'ON')
if self.spec.satisfies('@0.4:'):
env.set('pybind11_DIR', self.spec['py-pybind11'].prefix)
env.set('pybind11_INCLUDE_DIR',
self.spec['py-pybind11'].prefix.include)
if self.spec.satisfies('@1.10:'):
env.set('USE_SYSTEM_PYBIND11', 'ON')
env.set("BUILD_CUSTOM_PROTOBUF", "OFF")
env.set("USE_SYSTEM_NCCL", "ON")
env.set("USE_SYSTEM_EIGEN_INSTALL", "ON")
if self.spec.satisfies("@0.4:"):
env.set("pybind11_DIR", self.spec["py-pybind11"].prefix)
env.set("pybind11_INCLUDE_DIR", self.spec["py-pybind11"].prefix.include)
if self.spec.satisfies("@1.10:"):
env.set("USE_SYSTEM_PYBIND11", "ON")
# https://github.com/pytorch/pytorch/issues/60334
# if self.spec.satisfies('@1.8:'):
# env.set('USE_SYSTEM_SLEEF', 'ON')
if self.spec.satisfies('@1.6:'):
if self.spec.satisfies("@1.6:"):
# env.set('USE_SYSTEM_LIBS', 'ON')
# https://github.com/pytorch/pytorch/issues/60329
# env.set('USE_SYSTEM_CPUINFO', 'ON')
@ -422,27 +447,26 @@ class PyTorch(PythonPackage, CudaPackage):
# env.set('USE_SYSTEM_GLOO', 'ON')
# https://github.com/Maratyszcza/FP16/issues/18
# env.set('USE_SYSTEM_FP16', 'ON')
env.set('USE_SYSTEM_PTHREADPOOL', 'ON')
env.set('USE_SYSTEM_PSIMD', 'ON')
env.set('USE_SYSTEM_FXDIV', 'ON')
env.set('USE_SYSTEM_BENCHMARK', 'ON')
env.set("USE_SYSTEM_PTHREADPOOL", "ON")
env.set("USE_SYSTEM_PSIMD", "ON")
env.set("USE_SYSTEM_FXDIV", "ON")
env.set("USE_SYSTEM_BENCHMARK", "ON")
# https://github.com/pytorch/pytorch/issues/60331
# env.set('USE_SYSTEM_ONNX', 'ON')
# https://github.com/pytorch/pytorch/issues/60332
# env.set('USE_SYSTEM_XNNPACK', 'ON')
@run_before('install')
@run_before("install")
def build_amd(self):
if '+rocm' in self.spec:
python(os.path.join('tools', 'amd_build', 'build_amd.py'))
if "+rocm" in self.spec:
python(os.path.join("tools", "amd_build", "build_amd.py"))
@run_after('install')
@run_after("install")
@on_package_attributes(run_tests=True)
def install_test(self):
with working_dir('test'):
python('run_test.py')
with working_dir("test"):
python("run_test.py")
# Tests need to be re-added since `phases` was overridden
run_after('install')(
PythonPackage._run_default_install_time_test_callbacks)
run_after('install')(PythonPackage.sanity_check_prefix)
run_after("install")(PythonPackage._run_default_install_time_test_callbacks)
run_after("install")(PythonPackage.sanity_check_prefix)

File diff suppressed because it is too large Load diff

View file

@ -30,7 +30,7 @@ class Armcomputelibrary(SConsPackage):
url = "https://github.com/ARM-software/ComputeLibrary/archive/refs/tags/v23.02.zip"
git = "https://github.com/ARM-software/ComputeLibrary.git"
maintainers = ["annop-w"]
maintainers("annop-w")
version("23.02", sha256="bed1b24047ce00155e552204bc3983e86f46775414c554a34a7ece931d67ec62")
version("22.11", sha256="2f70f54d84390625222503ea38650c00c49d4b70bc86a6b9aeeebee9d243865f")

View file

@ -25,7 +25,7 @@ class Babelstream(CMakePackage, CudaPackage, ROCmPackage):
version("4.0", sha256="a9cd39277fb15d977d468435eb9b894f79f468233f0131509aa540ffda4f5953")
version("main", branch="main")
version("develop", branch="develop")
maintainers = ["tomdeakin", "kaanolgu" "tom91136", "robj0nes"]
maintainers("tomdeakin", "kaanolgu" "tom91136", "robj0nes")
# Languages
# Also supported variants are cuda and rocm (for HIP)

View file

@ -14,7 +14,7 @@ class CRaft(AutotoolsPackage):
git = "https://github.com/canonical/raft.git"
url = "https://github.com/canonical/raft/archive/refs/tags/v0.17.1.tar.gz"
maintainers = ["mdorier"]
maintainers("mdorier")
version("master", branch="master")
version("0.17.1", sha256="e31c7fafbdd5f94913161c5d64341a203364e512524b47295c97a91e83c4198b")

View file

@ -17,7 +17,7 @@ class CbtfArgonavisGui(QMakePackage):
homepage = "https://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-argonavis-gui.git"
maintainers = ["jgalarowicz"]
maintainers("jgalarowicz")
version("develop", branch="master")
version("1.3.0.0", branch="1.3.0.0")

View file

@ -15,7 +15,7 @@ class CbtfArgonavis(CMakePackage):
homepage = "https://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-argonavis.git"
maintainers = ["jgalarowicz"]
maintainers("jgalarowicz")
version("develop", branch="master")
version("1.9.4.1", branch="1.9.4.1")

View file

@ -18,7 +18,7 @@ class CbtfKrell(CMakePackage):
homepage = "https://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-krell.git"
maintainers = ["jgalarowicz"]
maintainers("jgalarowicz")
version("develop", branch="master")
version("1.9.4.1", branch="1.9.4.1")

View file

@ -13,7 +13,7 @@ class CbtfLanl(CMakePackage):
homepage = "https://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-lanl.git"
maintainers = ["jgalarowicz"]
maintainers("jgalarowicz")
version("develop", branch="master")
version("1.9.4.1", branch="1.9.4.1")

View file

@ -18,7 +18,7 @@ class Cbtf(CMakePackage):
homepage = "https://sourceforge.net/p/cbtf/wiki/Home"
git = "https://github.com/OpenSpeedShop/cbtf.git"
maintainers = ["jgalarowicz"]
maintainers("jgalarowicz")
version("develop", branch="master")
version("1.9.4.1", branch="1.9.4.1")

View file

@ -17,7 +17,7 @@ class DoubleBatchedFftLibrary(CMakePackage):
url = "https://github.com/intel/double-batched-fft-library/archive/refs/tags/v0.3.6.tar.gz"
git = "https://github.com/intel/double-batched-fft-library.git"
maintainers = ["uphoffc"]
maintainers("uphoffc")
version("main", branch="main")
version("develop", branch="develop")

View file

@ -16,7 +16,7 @@ class Dpmjet(MakefilePackage):
list_url = "https://github.com/DPMJET/DPMJET/tags"
git = "https://github.com/DPMJET/DPMJET.git"
maintainers = ["wdconinc"]
maintainers("wdconinc")
version("19.3.5", sha256="5a546ca20f86abaecda1828eb5b577aee8a532dffb2c5e7244667d5f25777909")
version("19.3.4", sha256="646f520aa67ef6355c45cde155a5dd55f7c9d661314358a7668f6ff472f5d5f9")

View file

@ -16,7 +16,7 @@ class Estarlight(CMakePackage):
list_url = "https://github.com/eic/estarlight/tags"
git = "https://github.com/eic/estarlight.git"
maintainers = ["wdconinc"]
maintainers("wdconinc")
version("master", branch="master")
version("1.0.1", sha256="b43c1dd3663d8f325f30b17dd7cf4b49f2eb8ceeed7319c5aabebec8676279fd")

View file

@ -12,7 +12,7 @@ class Getorganelle(PythonPackage):
homepage = "https://github.com/Kinggerm/GetOrganelle"
url = "https://github.com/Kinggerm/GetOrganelle/archive/refs/tags/1.7.5.0.tar.gz"
maintainers = ["snehring"]
maintainers("snehring")
version("1.7.7.0", sha256="dd351b5cd33688adfcd8bff9794ae0cc0ce01a572dac2bcf6c9d7db77b3e4883")
version("1.7.5.0", sha256="c498196737726cb4c0158f23037bf301a069f5028ece729bb4d09c7d915df93d")

View file

@ -12,7 +12,7 @@ class GsiNcdiag(CMakePackage):
homepage = "https://github.com/NOAA-EMC/GSI-ncdiag"
url = "https://github.com/NOAA-EMC/GSI-ncdiag/archive/refs/tags/v1.0.0.tar.gz"
maintainers = ["ulmononian"]
maintainers("ulmononian")
version("1.0.0", sha256="7251d6139c2bc1580db5f7f019e10a4c73d188ddd52ccf21ecc9e39d50a6af51")

View file

@ -14,7 +14,7 @@ class Lcov(MakefilePackage):
homepage = "http://ltp.sourceforge.net/coverage/lcov.php"
url = "https://github.com/linux-test-project/lcov/releases/download/v1.14/lcov-1.14.tar.gz"
maintainers = ["KineticTheory"]
maintainers("KineticTheory")
version("1.16", sha256="987031ad5528c8a746d4b52b380bc1bffe412de1f2b9c2ba5224995668e3240b")
version("1.15", sha256="c1cda2fa33bec9aa2c2c73c87226cfe97de0831887176b45ee523c5e30f8053a")

View file

@ -13,7 +13,7 @@ class Netcdf95(CMakePackage):
homepage = "https://lguez.github.io/NetCDF95/"
git = "https://github.com/lguez/NetCDF95.git"
maintainers = ["RemiLacroix-IDRIS"]
maintainers("RemiLacroix-IDRIS")
version("0.3", tag="v0.3", submodules=True)

View file

@ -31,7 +31,7 @@ class OpenspeedshopUtils(CMakePackage):
homepage = "http://www.openspeedshop.org"
git = "https://github.com/OpenSpeedShop/openspeedshop.git"
maintainers = ["jgalarowicz"]
maintainers("jgalarowicz")
version("develop", branch="master")
version("2.4.2.1", branch="2.4.2.1")

View file

@ -26,7 +26,7 @@ class Openspeedshop(CMakePackage):
homepage = "http://www.openspeedshop.org"
git = "https://github.com/OpenSpeedShop/openspeedshop.git"
maintainers = ["jgalarowicz"]
maintainers("jgalarowicz")
version("develop", branch="master")
version("2.4.2.1", branch="2.4.2.1")

View file

@ -12,7 +12,7 @@ class PyAmityping(PythonPackage):
homepage = "https://github.com/slac-lcls/amityping"
url = "https://github.com/slac-lcls/amityping/archive/refs/tags/1.1.12.tar.gz"
maintainers = ["valmar"]
maintainers("valmar")
version("1.1.12", sha256="e00e7102a53fa6ee343f018669f6b811d703a2da4728b497f80579bf89efbd3c")

View file

@ -11,7 +11,7 @@ class PyLclsKrtc(PythonPackage):
pypi = "lcls-krtc/lcls-krtc-0.2.0.tar.gz"
maintainers = ["valmar"]
maintainers("valmar")
version("0.2.0", sha256="20e6327d488d23e29135be44504bf7df72e4425a518f4222841efcd2cd2985f9")

View file

@ -13,7 +13,7 @@ class PyMinkowskiengine(PythonPackage, CudaPackage):
homepage = "https://nvidia.github.io/MinkowskiEngine/"
pypi = "MinkowskiEngine/MinkowskiEngine-0.5.4.tar.gz"
maintainers = ["wdconinc"]
maintainers("wdconinc")
version("0.5.4", sha256="b1879c00d0b0b1d30ba622cce239886a7e3c78ee9da1064cdfe2f64c2ab15f94")

View file

@ -12,7 +12,7 @@ class PyPyabel(PythonPackage):
homepage = "https://github.com/PyAbel/PyAbel"
pypi = "PyAbel/PyAbel-0.9.0.tar.gz"
maintainers = ["valmar"]
maintainers("valmar")
version("0.9.0", sha256="4052143de9da19be13bb321fb0524090ffc8cdc56e0e990e5d6f557f18109f08")

View file

@ -16,7 +16,7 @@ class Rocmlir(CMakePackage):
git = "https://github.com/ROCmSoftwarePlatform/rocMLIR.git"
url = "https://github.com/ROCmSoftwarePlatform/rocMLIR/archive/refs/tags/rocm-5.4.3.tar.gz"
maintainers = ["srekolam"]
maintainers("srekolam")
version("5.4.3", sha256="c0ba0f565e1c6614c9e6091a24cbef67b734a29e4a4ed7a8a57dc43f58ed8d53")
version("5.4.0", sha256="3823f455ee392118c3281e27d45fa0e5381f3c4070eb4e06ba13bc6b34a90a60")

View file

@ -14,7 +14,7 @@ class Scorep(AutotoolsPackage):
homepage = "https://www.vi-hps.org/projects/score-p"
url = "https://perftools.pages.jsc.fz-juelich.de/cicd/scorep/tags/scorep-7.1/scorep-7.1.tar.gz"
maintainers = ["wrwilliams"]
maintainers("wrwilliams")
version("8.0", sha256="4c0f34f20999f92ebe6ca1ff706d0846b8ce6cd537ffbedb49dfaef0faa66311")
version("7.1", sha256="98dea497982001fb82da3429ca55669b2917a0858c71abe2cfe7cd113381f1f7")

View file

@ -21,7 +21,7 @@ class Tandem(CMakePackage):
version("1.0", tag="v1.0", submodules=True)
patch("fix_v1.0_compilation.diff", when="@1.0")
maintainers = ["dmay23", "Thomas-Ulrich"]
maintainers("dmay23", "Thomas-Ulrich")
variant("polynomial_degree", default="2")
variant("domain_dimension", default="2", values=("2", "3"), multi=False)
variant("min_quadrature_order", default="0")

View file

@ -22,7 +22,7 @@ class Thrift(Package):
list_url = "http://archive.apache.org/dist/thrift/"
list_depth = 1
maintainers = ["thomas-bouvier"]
maintainers("thomas-bouvier")
version("0.18.1", sha256="04c6f10e5d788ca78e13ee2ef0d2152c7b070c0af55483d6b942e29cff296726")
version("0.17.0", sha256="b272c1788bb165d99521a2599b31b97fa69e5931d099015d91ae107a0b0cc58f")

View file

@ -12,7 +12,7 @@ class Xtcdata(CMakePackage):
homepage = "https://github.com/slac-lcls/lcls2"
url = "https://github.com/slac-lcls/lcls2/archive/refs/tags/3.3.37.tar.gz"
maintainers = ["valmar"]
maintainers("valmar")
version("3.3.37", sha256="127a5ae44c9272039708bd877849a3af354ce881fde093a2fc6fe0550b698b72")