#!/usr/bin/env bash SCRIPT_PATH=$( cd "$(dirname "$0")" ; pwd -P ) IMAGE_NAME="alpine" IMAGE_VERSION="3.4" SERVICE_NAME=aws-cli source ${SCRIPT_PATH}/../common.shinc # # Project specific variables # AWS_DEFAULT_REGION=eu-central-1 CUSTOM_ARGS="" CURRENT_DATE="$(date +%Y-%m-%d)" EXPIRES=$(date '+%a, %d %b %Y 00:00:00 GMT' -d "${CURRENT_DATE} + 365 day") CACHE_MAX_AGE="31536000" ACL="public-read" source ${SCRIPT_PATH}/env.shinc 2> /dev/null REQUIRED_VARIABLES=(AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY) for _var in ${REQUIRED_VARIABLES[@]}; do if [ -z ${!_var+x} ]; then echo "Please provide credential in env.shinc file, missing definition of variable: ${_var}" exit 2 fi done init() { __build [[ $? -ne 0 ]] && return 1 } cmd() { local localDir read localDir if [[ ${localDir} = "" ]]; then localDir="$(pwd -P)" __warn "You have not provided a directory, using current path: ${localDir}" __msg "Continue? [(y)/n]" read CONTINUE if [[ ${CONTINUE} != "" ]] && [[ ${CONTINUE} != "y" ]] && [[ ${CONTINUE} != "Y" ]]; then return 0 fi fi __warn "Mounting ${localDir} to /mnt/uploads inside the docker container" docker run --rm \ -u $(id -u) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ -v ${localDir}:/mnt/uploads \ ${CUSTOM_ARGS} \ ${SERVICE_NAME} \ aws "$@" } upload() { local source="$1" shift local target="$1" shift local args="$@" [[ ${source} = "" ]] && __err "You must provide a source directory (filesystem) as the first parameter" && return 137 [[ ${target} = "" ]] && __err "You must provide a target directory (s3) as the second parameter" && return 137 [[ ! -e ${source} ]] && __err "The source does not exist: ${source}" && return 6 __warn "Uploading ${source} to s3://${target}" __msg "Cache expires on ${EXPIRES}" docker run --rm \ -u $(id -u) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ -v ${source}:/mnt/uploads \ ${SERVICE_NAME} \ aws s3 cp /mnt/uploads s3://${target} ${args} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE } download() { local source="$1" shift local target="$1" shift local args="$@" [[ ${source} = "" ]] && __err "You must provide a source (filesystem) as the first parameter" && return 137 [[ ${target} = "" ]] && __err "You must provide a target (s3) as the second parameter" && return 137 __warn "Downloading s3://${source} to ${target}" if [[ ! -e ${target} ]]; then __warn "The target ${target} does not exist. Is this a file (f) or a directory (d)? ..." read type if [[ ${type} = "f" ]]; then mkdir -p $(dirname ${target}) touch ${target} elif [[ ${type} = "d" ]]; then mkdir -p ${target} else __err "The input is invalid, please use either 'f' or 'd'" fi fi docker run --rm \ -u $(id -u) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ -v ${target}:/mnt/uploads \ ${SERVICE_NAME} \ aws s3 cp s3://${source} /mnt/uploads ${args} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE } migrate() { local source_dir="$1" shift local target_dir="$1" shift local args="$@" [[ ${source_dir} = "" ]] && __err "You must provide a source directory (s3) as the first parameter" && return 137 [[ ${target_dir} = "" ]] && __err "You must provide a target directory (s3) as the second parameter" && return 137 __warn "Migrating s3://${source} to s3://${target}" echo "Cache expires on ${EXPIRES}" docker run --rm \ -u $(id -u) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ ${SERVICE_NAME} \ aws s3 cp s3://${source_dir} s3://${target_dir} ${args} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE } setPermissions() { local target_dir="$1" if [[ ${target_dir} = "" ]]; then __warn "Please provide an s3 path: " read target_dir fi docker run --rm \ -u $(id -u) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ ${SERVICE_NAME} \ aws s3 cp s3://${target_dir} s3://${target_dir} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE } exec() { local source_dir="$1" if [[ ${source_dir} = "" ]]; then __warn "You have not provided a directory, using current path: ${localDir}" __msg "Continue? [(y)/n]" read CONTINUE if [[ ${CONTINUE} != "" ]] && [[ ${CONTINUE} != "y" ]] && [[ ${CONTINUE} != "Y" ]]; then return 0 fi fi docker run --rm -it \ -u $(id -u) \ --env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ --env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ --env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \ -v ${source_dir}:/mnt/uploads \ ${SERVICE_NAME} \ sh } "$@" exit $?