release workflow: Add build scripts for jobs and means to upload pkgs
This commit is contained in:
parent
a6e8e889b2
commit
5600c9f0d9
3 changed files with 541 additions and 0 deletions
13
bin/rebuild-index.sh
Executable file
13
bin/rebuild-index.sh
Executable file
|
@ -0,0 +1,13 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
set -x
|
||||
|
||||
SPACK_BIN_DIR="${CI_PROJECT_DIR}/bin"
|
||||
export PATH="${SPACK_BIN_DIR}:${PATH}"
|
||||
|
||||
spack upload-s3 index
|
316
bin/rebuild-package.sh
Executable file
316
bin/rebuild-package.sh
Executable file
|
@ -0,0 +1,316 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
###
|
||||
### This script represents a gitlab-ci job, corresponding to a single release
|
||||
### spec. As such this script must first decide whether or not the spec it
|
||||
### has been assigned is up to date on the remote binary mirror. If it is
|
||||
### not (i.e. the source code has changed in a way that caused a change in the
|
||||
### full_hash of the spec), this script will build the package, create a
|
||||
### binary cache for it, and then push all related files to the remote binary
|
||||
### mirror. This script also communicates with a remote CDash instance to
|
||||
### share status on the package build process.
|
||||
###
|
||||
### The following environment variables are expected to be set in order for
|
||||
### the various elements in this script to function properly. Listed first
|
||||
### are two defaults we rely on from gitlab, then three we set up in the
|
||||
### variables section of gitlab ourselves, and finally four variables
|
||||
### written into the .gitlab-ci.yml file.
|
||||
###
|
||||
### CI_PROJECT_DIR
|
||||
### CI_JOB_NAME
|
||||
###
|
||||
### AWS_ACCESS_KEY_ID
|
||||
### AWS_SECRET_ACCESS_KEY
|
||||
### SPACK_SIGNING_KEY
|
||||
###
|
||||
### CDASH_BASE_URL
|
||||
### ROOT_SPEC
|
||||
### DEPENDENCIES
|
||||
### MIRROR_URL
|
||||
###
|
||||
|
||||
shopt -s expand_aliases
|
||||
|
||||
export FORCE_UNSAFE_CONFIGURE=1
|
||||
|
||||
TEMP_DIR="${CI_PROJECT_DIR}/jobs_scratch_dir"
|
||||
|
||||
JOB_LOG_DIR="${TEMP_DIR}/logs"
|
||||
SPEC_DIR="${TEMP_DIR}/specs"
|
||||
LOCAL_MIRROR="${CI_PROJECT_DIR}/local_mirror"
|
||||
BUILD_CACHE_DIR="${LOCAL_MIRROR}/build_cache"
|
||||
SPACK_BIN_DIR="${CI_PROJECT_DIR}/bin"
|
||||
CDASH_UPLOAD_URL="${CDASH_BASE_URL}/submit.php?project=Spack"
|
||||
DEP_JOB_RELATEBUILDS_URL="${CDASH_BASE_URL}/api/v1/relateBuilds.php"
|
||||
declare -a JOB_DEPS_PKG_NAMES
|
||||
|
||||
export SPACK_ROOT=${CI_PROJECT_DIR}
|
||||
export PATH="${SPACK_BIN_DIR}:${PATH}"
|
||||
export GNUPGHOME="${CI_PROJECT_DIR}/opt/spack/gpg"
|
||||
|
||||
mkdir -p ${JOB_LOG_DIR}
|
||||
mkdir -p ${SPEC_DIR}
|
||||
|
||||
cleanup() {
|
||||
set +x
|
||||
|
||||
if [ -z "$exit_code" ] ; then
|
||||
|
||||
exit_code=$1
|
||||
if [ -z "$exit_code" ] ; then
|
||||
exit_code=0
|
||||
fi
|
||||
|
||||
restore_io
|
||||
|
||||
if [ "$( type -t finalize )" '=' 'function' ] ; then
|
||||
finalize "$JOB_LOG_DIR/cdash_log.txt"
|
||||
fi
|
||||
|
||||
# We can clean these out later on, once we have a good sense for
|
||||
# how the logging infrastructure is working
|
||||
# rm -rf "$JOB_LOG_DIR"
|
||||
fi
|
||||
|
||||
\exit $exit_code
|
||||
}
|
||||
|
||||
alias exit='cleanup'
|
||||
|
||||
begin_logging() {
|
||||
trap "cleanup 1; \\exit \$exit_code" INT TERM QUIT
|
||||
trap "cleanup 0; \\exit \$exit_code" EXIT
|
||||
|
||||
rm -rf "$JOB_LOG_DIR/cdash_log.txt"
|
||||
|
||||
# NOTE: Here, some redirects are set up
|
||||
exec 3>&1 # fd 3 is now a dup of stdout
|
||||
exec 4>&2 # fd 4 is now a dup of stderr
|
||||
|
||||
# stdout and stderr are joined and redirected to the log
|
||||
exec &> "$JOB_LOG_DIR/cdash_log.txt"
|
||||
|
||||
set -x
|
||||
}
|
||||
|
||||
restore_io() {
|
||||
exec >&-
|
||||
exec 2>&-
|
||||
|
||||
exec >&3
|
||||
exec 2>&4
|
||||
|
||||
exec 3>&-
|
||||
exec 4>&-
|
||||
}
|
||||
|
||||
finalize() {
|
||||
# If you define a finalize function:
|
||||
# - it will always be called at the very end of the script
|
||||
# - the log file will be passed in as the first argument, and
|
||||
# - the code in this function will not be logged.
|
||||
echo "The full log file is located at $1"
|
||||
# TODO: send this log data to cdash!
|
||||
}
|
||||
|
||||
check_error()
|
||||
{
|
||||
local last_exit_code=$1
|
||||
local last_cmd=$2
|
||||
if [[ ${last_exit_code} -ne 0 ]]; then
|
||||
echo "${last_cmd} exited with code ${last_exit_code}"
|
||||
echo "TERMINATING JOB"
|
||||
exit 1
|
||||
else
|
||||
echo "${last_cmd} completed successfully"
|
||||
fi
|
||||
}
|
||||
|
||||
extract_build_id()
|
||||
{
|
||||
LINES_TO_SEARCH=$1
|
||||
regex="buildSummary\.php\?buildid=([[:digit:]]+)"
|
||||
SINGLE_LINE_OUTPUT=$(echo ${LINES_TO_SEARCH} | tr -d '\n')
|
||||
|
||||
if [[ ${SINGLE_LINE_OUTPUT} =~ ${regex} ]]; then
|
||||
echo "${BASH_REMATCH[1]}"
|
||||
else
|
||||
echo "NONE"
|
||||
fi
|
||||
}
|
||||
|
||||
get_relate_builds_post_data()
|
||||
{
|
||||
cat <<EOF
|
||||
{
|
||||
"project": "${1}",
|
||||
"buildid": ${2},
|
||||
"relatedid": ${3},
|
||||
"relationship": "depends on"
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
gen_full_specs_for_job_and_deps() {
|
||||
|
||||
read -ra PARTSARRAY <<< "${CI_JOB_NAME}"
|
||||
local pkgName="${PARTSARRAY[0]}"
|
||||
local pkgVersion="${PARTSARRAY[1]}"
|
||||
local compiler="${PARTSARRAY[2]}"
|
||||
local osarch="${PARTSARRAY[3]}"
|
||||
|
||||
JOB_SPEC_NAME="${pkgName}@${pkgVersion}%${compiler} arch=${osarch}"
|
||||
JOB_PKG_NAME="${pkgName}"
|
||||
SPEC_YAML_PATH="${SPEC_DIR}/${pkgName}.yaml"
|
||||
local root_spec_name="${ROOT_SPEC}"
|
||||
local spec_names_to_save="${pkgName}"
|
||||
|
||||
IFS=';' read -ra DEPS <<< "${DEPENDENCIES}"
|
||||
for i in "${DEPS[@]}"; do
|
||||
read -ra PARTSARRAY <<< "${i}"
|
||||
pkgName="${PARTSARRAY[0]}"
|
||||
spec_names_to_save="${spec_names_to_save} ${pkgName}"
|
||||
JOB_DEPS_PKG_NAMES+=("${pkgName}")
|
||||
done
|
||||
|
||||
spack -d buildcache save-yaml --specs "${spec_names_to_save}" --root-spec "${root_spec_name}" --yaml-dir "${SPEC_DIR}"
|
||||
}
|
||||
|
||||
begin_logging
|
||||
|
||||
gen_full_specs_for_job_and_deps
|
||||
|
||||
echo "Building package ${JOB_SPEC_NAME}, ${HASH}, ${MIRROR_URL}"
|
||||
|
||||
# Finally, list the compilers spack knows about
|
||||
echo "Compiler Configurations:"
|
||||
spack config get compilers
|
||||
|
||||
# Make the build_cache directory if it doesn't exist
|
||||
mkdir -p "${BUILD_CACHE_DIR}"
|
||||
|
||||
# Get buildcache name so we can write a CDash build id file in the right place.
|
||||
# If we're unable to get the buildcache name, we may have encountered a problem
|
||||
# concretizing the spec, or some other issue that will eventually cause the job
|
||||
# to fail.
|
||||
JOB_BUILD_CACHE_ENTRY_NAME=`spack -d buildcache get-buildcache-name --spec-yaml "${SPEC_YAML_PATH}"`
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "ERROR, unable to get buildcache entry name for job ${CI_JOB_NAME} (spec: ${JOB_SPEC_NAME})"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# This should create the directory we referred to as GNUPGHOME earlier
|
||||
spack gpg list
|
||||
|
||||
# Importing the secret key using gpg2 directly should allow to
|
||||
# sign and verify both
|
||||
set +x
|
||||
KEY_IMPORT_RESULT=`echo ${SPACK_SIGNING_KEY} | base64 --decode | gpg2 --import`
|
||||
check_error $? "gpg2 --import"
|
||||
set -x
|
||||
|
||||
spack gpg list --trusted
|
||||
spack gpg list --signing
|
||||
|
||||
# Whether we have to build the spec or download it pre-built, we expect to find
|
||||
# the cdash build id file sitting in this location afterwards.
|
||||
JOB_CDASH_ID_FILE="${BUILD_CACHE_DIR}/${JOB_BUILD_CACHE_ENTRY_NAME}.cdashid"
|
||||
|
||||
# Finally, we can check the spec we have been tasked with build against
|
||||
# the built binary on the remote mirror to see if it needs to be rebuilt
|
||||
spack -d buildcache check --spec-yaml "${SPEC_YAML_PATH}" --mirror-url "${MIRROR_URL}" --rebuild-on-error
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
# Configure mirror
|
||||
spack mirror add local_artifact_mirror "file://${LOCAL_MIRROR}"
|
||||
|
||||
JOB_CDASH_ID="NONE"
|
||||
|
||||
# Install package, using the buildcache from the local mirror to
|
||||
# satisfy dependencies.
|
||||
BUILD_ID_LINE=`spack -d -k -v install --use-cache --cdash-upload-url "${CDASH_UPLOAD_URL}" --cdash-build "${JOB_SPEC_NAME}" --cdash-site "Spack AWS Gitlab Instance" --cdash-track "Experimental" -f "${SPEC_YAML_PATH}" | grep "buildSummary\\.php"`
|
||||
check_error $? "spack install"
|
||||
|
||||
# By parsing the output of the "spack install" command, we can get the
|
||||
# buildid generated for us by CDash
|
||||
JOB_CDASH_ID=$(extract_build_id "${BUILD_ID_LINE}")
|
||||
|
||||
# Create buildcache entry for this package, reading the spec from the yaml
|
||||
# file.
|
||||
spack -d buildcache create --spec-yaml "${SPEC_YAML_PATH}" -a -f -d "${LOCAL_MIRROR}" --no-rebuild-index
|
||||
check_error $? "spack buildcache create"
|
||||
|
||||
# Write the .cdashid file to the buildcache as well
|
||||
echo "${JOB_CDASH_ID}" >> ${JOB_CDASH_ID_FILE}
|
||||
|
||||
# TODO: The upload-s3 command should eventually be replaced with something
|
||||
# like: "spack buildcache put <mirror> <spec>", when that subcommand is
|
||||
# properly implemented.
|
||||
spack -d upload-s3 spec --base-dir "${LOCAL_MIRROR}" --spec-yaml "${SPEC_YAML_PATH}"
|
||||
check_error $? "spack upload-s3 spec"
|
||||
else
|
||||
echo "spec ${JOB_SPEC_NAME} is already up to date on remote mirror, downloading it"
|
||||
|
||||
# Configure remote mirror so we can download buildcache entry
|
||||
spack mirror add remote_binary_mirror ${MIRROR_URL}
|
||||
|
||||
# Now download it
|
||||
spack -d buildcache download --spec-yaml "${SPEC_YAML_PATH}" --path "${BUILD_CACHE_DIR}/" --require-cdashid
|
||||
check_error $? "spack buildcache download"
|
||||
fi
|
||||
|
||||
# The next step is to relate this job to the jobs it depends on
|
||||
if [ -f "${JOB_CDASH_ID_FILE}" ]; then
|
||||
JOB_CDASH_BUILD_ID=$(<${JOB_CDASH_ID_FILE})
|
||||
|
||||
if [ "${JOB_CDASH_BUILD_ID}" == "NONE" ]; then
|
||||
echo "ERROR: unable to read this jobs id from ${JOB_CDASH_ID_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Now get CDash ids for dependencies and "relate" each dependency build
|
||||
# with this jobs build
|
||||
for DEP_PKG_NAME in "${JOB_DEPS_PKG_NAMES[@]}"; do
|
||||
echo "Getting cdash id for dependency --> ${DEP_PKG_NAME} <--"
|
||||
DEP_SPEC_YAML_PATH="${SPEC_DIR}/${DEP_PKG_NAME}.yaml"
|
||||
DEP_JOB_BUILDCACHE_NAME=`spack -d buildcache get-buildcache-name --spec-yaml "${DEP_SPEC_YAML_PATH}"`
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
DEP_JOB_ID_FILE="${BUILD_CACHE_DIR}/${DEP_JOB_BUILDCACHE_NAME}.cdashid"
|
||||
echo "DEP_JOB_ID_FILE path = ${DEP_JOB_ID_FILE}"
|
||||
|
||||
if [ -f "${DEP_JOB_ID_FILE}" ]; then
|
||||
DEP_JOB_CDASH_BUILD_ID=$(<${DEP_JOB_ID_FILE})
|
||||
echo "File ${DEP_JOB_ID_FILE} contained value ${DEP_JOB_CDASH_BUILD_ID}"
|
||||
echo "Relating builds -> ${JOB_SPEC_NAME} (buildid=${JOB_CDASH_BUILD_ID}) depends on ${DEP_PKG_NAME} (buildid=${DEP_JOB_CDASH_BUILD_ID})"
|
||||
relateBuildsPostBody="$(get_relate_builds_post_data "Spack" ${JOB_CDASH_BUILD_ID} ${DEP_JOB_CDASH_BUILD_ID})"
|
||||
relateBuildsResult=`curl "${DEP_JOB_RELATEBUILDS_URL}" -H "Content-Type: application/json" -H "Accept: application/json" -d "${relateBuildsPostBody}"`
|
||||
echo "Result of curl request: ${relateBuildsResult}"
|
||||
else
|
||||
echo "ERROR: Did not find expected .cdashid file for dependency: ${DEP_JOB_ID_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "ERROR: Unable to get buildcache entry name for ${DEP_SPEC_NAME}"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo "ERROR: Did not find expected .cdashid file ${JOB_CDASH_ID_FILE}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Show the size of the buildcache and a list of what's in it, directly
|
||||
# in the gitlab log output
|
||||
(
|
||||
restore_io
|
||||
du -sh ${BUILD_CACHE_DIR}
|
||||
find ${BUILD_CACHE_DIR} -maxdepth 3 -type d -ls
|
||||
)
|
||||
|
||||
echo "End of rebuild package script"
|
212
lib/spack/spack/cmd/upload_s3.py
Normal file
212
lib/spack/spack/cmd/upload_s3.py
Normal file
|
@ -0,0 +1,212 @@
|
|||
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
|
||||
# Spack Project Developers. See the top-level COPYRIGHT file for details.
|
||||
#
|
||||
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
|
||||
|
||||
# TODO: This will be merged into the buildcache command once
|
||||
# everything is working.
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
try:
|
||||
import boto3
|
||||
import botocore
|
||||
have_boto3_support = True
|
||||
except ImportError:
|
||||
have_boto3_support = False
|
||||
|
||||
import llnl.util.tty as tty
|
||||
|
||||
from spack.error import SpackError
|
||||
import spack.tengine as template_engine
|
||||
from spack.spec import Spec
|
||||
|
||||
|
||||
import spack.binary_distribution as bindist
|
||||
|
||||
|
||||
description = "temporary command to upload buildcaches to 's3.spack.io'"
|
||||
section = "packaging"
|
||||
level = "long"
|
||||
|
||||
|
||||
def setup_parser(subparser):
|
||||
setup_parser.parser = subparser
|
||||
subparsers = subparser.add_subparsers(help='upload-s3 sub-commands')
|
||||
|
||||
# sub-command to upload a built spec to s3
|
||||
spec = subparsers.add_parser('spec', help=upload_spec.__doc__)
|
||||
|
||||
spec.add_argument('-s', '--spec', default=None,
|
||||
help='Spec to upload')
|
||||
|
||||
spec.add_argument('-y', '--spec-yaml', default=None,
|
||||
help='Path to spec yaml file containing spec to upload')
|
||||
|
||||
spec.add_argument('-b', '--base-dir', default=None,
|
||||
help='Path to root of buildcaches')
|
||||
|
||||
spec.add_argument('-e', '--endpoint-url',
|
||||
default='https://s3.spack.io', help='URL of mirror')
|
||||
|
||||
spec.set_defaults(func=upload_spec)
|
||||
|
||||
# sub-command to update the index of a buildcache on s3
|
||||
index = subparsers.add_parser('index', help=update_index.__doc__)
|
||||
|
||||
index.add_argument('-e', '--endpoint-url',
|
||||
default='https://s3.spack.io', help='URL of mirror')
|
||||
|
||||
index.set_defaults(func=update_index)
|
||||
|
||||
|
||||
def get_s3_session(endpoint_url):
|
||||
if not have_boto3_support:
|
||||
raise SpackError('boto3 module not available')
|
||||
|
||||
session = boto3.Session()
|
||||
s3 = session.resource('s3')
|
||||
|
||||
bucket_names = []
|
||||
for bucket in s3.buckets.all():
|
||||
bucket_names.append(bucket.name)
|
||||
|
||||
if len(bucket_names) > 1:
|
||||
raise SpackError('More than one bucket associated with credentials')
|
||||
|
||||
bucket_name = bucket_names[0]
|
||||
|
||||
return s3, bucket_name
|
||||
|
||||
|
||||
def update_index(args):
|
||||
"""Update the index of an s3 buildcache"""
|
||||
s3, bucket_name = get_s3_session(args.endpoint_url)
|
||||
|
||||
bucket = s3.Bucket(bucket_name)
|
||||
exists = True
|
||||
|
||||
try:
|
||||
s3.meta.client.head_bucket(Bucket=bucket_name)
|
||||
except botocore.exceptions.ClientError as e:
|
||||
# If a client error is thrown, then check that it was a 404 error.
|
||||
# If it was a 404 error, then the bucket does not exist.
|
||||
error_code = e.response['Error']['Code']
|
||||
if error_code == '404':
|
||||
exists = False
|
||||
|
||||
if not exists:
|
||||
tty.error('S3 bucket "{0}" does not exist'.format(bucket_name))
|
||||
sys.exit(1)
|
||||
|
||||
build_cache_dir = os.path.join(
|
||||
'mirror', bindist.build_cache_relative_path())
|
||||
|
||||
spec_yaml_regex = re.compile('{0}/(.+\\.spec\\.yaml)$'.format(
|
||||
build_cache_dir))
|
||||
spack_regex = re.compile('{0}/([^/]+)/.+\\.spack$'.format(
|
||||
build_cache_dir))
|
||||
|
||||
top_level_keys = set()
|
||||
|
||||
for key in bucket.objects.all():
|
||||
m = spec_yaml_regex.search(key.key)
|
||||
if m:
|
||||
top_level_keys.add(m.group(1))
|
||||
print(m.group(1))
|
||||
continue
|
||||
|
||||
m = spack_regex.search(key.key)
|
||||
if m:
|
||||
top_level_keys.add(m.group(1))
|
||||
print(m.group(1))
|
||||
continue
|
||||
|
||||
index_data = {
|
||||
'top_level_keys': top_level_keys,
|
||||
}
|
||||
|
||||
env = template_engine.make_environment()
|
||||
template_dir = 'misc'
|
||||
index_template = os.path.join(template_dir, 'buildcache_index.html')
|
||||
t = env.get_template(index_template)
|
||||
contents = t.render(index_data)
|
||||
|
||||
index_key = os.path.join(build_cache_dir, 'index.html')
|
||||
|
||||
tty.debug('Generated index:')
|
||||
tty.debug(contents)
|
||||
tty.debug('Pushing it to {0} -> {1}'.format(bucket_name, index_key))
|
||||
|
||||
s3_obj = s3.Object(bucket_name, index_key)
|
||||
s3_obj.put(Body=contents, ACL='public-read')
|
||||
|
||||
|
||||
def upload_spec(args):
|
||||
"""Upload a spec to s3 bucket"""
|
||||
if not args.spec and not args.spec_yaml:
|
||||
tty.error('Cannot upload spec without spec arg or path to spec yaml')
|
||||
sys.exit(1)
|
||||
|
||||
if not args.base_dir:
|
||||
tty.error('No base directory for buildcache specified')
|
||||
sys.exit(1)
|
||||
|
||||
if args.spec:
|
||||
try:
|
||||
spec = Spec(args.spec)
|
||||
spec.concretize()
|
||||
except Exception:
|
||||
tty.error('Unable to concrectize spec from string {0}'.format(
|
||||
args.spec))
|
||||
sys.exit(1)
|
||||
else:
|
||||
try:
|
||||
with open(args.spec_yaml, 'r') as fd:
|
||||
spec = Spec.from_yaml(fd.read())
|
||||
except Exception:
|
||||
tty.error('Unable to concrectize spec from yaml {0}'.format(
|
||||
args.spec_yaml))
|
||||
sys.exit(1)
|
||||
|
||||
s3, bucket_name = get_s3_session(args.endpoint_url)
|
||||
|
||||
build_cache_dir = bindist.build_cache_relative_path()
|
||||
|
||||
tarball_key = os.path.join(
|
||||
build_cache_dir, bindist.tarball_path_name(spec, '.spack'))
|
||||
tarball_path = os.path.join(args.base_dir, tarball_key)
|
||||
|
||||
specfile_key = os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(spec, '.spec.yaml'))
|
||||
specfile_path = os.path.join(args.base_dir, specfile_key)
|
||||
|
||||
cdashidfile_key = os.path.join(
|
||||
build_cache_dir, bindist.tarball_name(spec, '.cdashid'))
|
||||
cdashidfile_path = os.path.join(args.base_dir, cdashidfile_key)
|
||||
|
||||
tty.msg('Uploading {0}'.format(tarball_key))
|
||||
s3.meta.client.upload_file(
|
||||
tarball_path, bucket_name,
|
||||
os.path.join('mirror', tarball_key),
|
||||
ExtraArgs={'ACL': 'public-read'})
|
||||
|
||||
tty.msg('Uploading {0}'.format(specfile_key))
|
||||
s3.meta.client.upload_file(
|
||||
specfile_path, bucket_name,
|
||||
os.path.join('mirror', specfile_key),
|
||||
ExtraArgs={'ACL': 'public-read'})
|
||||
|
||||
if os.path.exists(cdashidfile_path):
|
||||
tty.msg('Uploading {0}'.format(cdashidfile_key))
|
||||
s3.meta.client.upload_file(
|
||||
cdashidfile_path, bucket_name,
|
||||
os.path.join('mirror', cdashidfile_key),
|
||||
ExtraArgs={'ACL': 'public-read'})
|
||||
|
||||
|
||||
def upload_s3(parser, args):
|
||||
if args.func:
|
||||
args.func(args)
|
Loading…
Reference in a new issue