Update elastic

This commit is contained in:
Arnie 2018-01-09 11:51:39 +01:00
parent 82e2decda3
commit 355369978e
3 changed files with 322 additions and 0 deletions

10
aws-cli/Dockerfile Normal file
View File

@ -0,0 +1,10 @@
ARG IMAGE_NAME
ARG IMAGE_VERSION
FROM ${IMAGE_NAME}:${IMAGE_VERSION}
RUN apk --no-cache update && \
apk --no-cache add python py-pip py-setuptools ca-certificates groff less && \
pip --no-cache-dir install awscli && \
rm -rf /var/cache/apk/*
WORKDIR /mnt/uploads

109
aws-cli/README.md Normal file
View File

@ -0,0 +1,109 @@
# AWS CLI Docker Container
[![GitHub forks](https://img.shields.io/github/forks/sekka1/aws-cli-docker.svg)](https://github.com/sekka1/aws-cli-docker/network)
[![GitHub stars](https://img.shields.io/github/stars/sekka1/aws-cli-docker.svg)](https://github.com/sekka1/aws-cli-docker/stargazers)
[![GitHub issues](https://img.shields.io/github/issues/sekka1/aws-cli-docker.svg)](https://github.com/sekka1/aws-cli-docker/issues)
[![Twitter](https://img.shields.io/twitter/url/https/github.com/sekka1/aws-cli-docker.svg?style=social)](https://twitter.com/intent/tweet?text=AWS%20CLI%20in%20a%20%40Docker%20container%20%40AWSCLI:&url=https://github.com/sekka1/aws-cli-docker)
[![Docker Pulls](https://img.shields.io/docker/pulls/garland/aws-cli-docker.svg)](https://hub.docker.com/r/garland/aws-cli-docker/)
[![Docker Stars](https://img.shields.io/docker/stars/garland/aws-cli-docker.svg)](https://hub.docker.com/r/garland/aws-cli-docker/)
Docker container with the AWS CLI installed.
Using [Alpine linux](https://hub.docker.com/_/alpine/). The Docker image is 87MB
An automated build of this image is on Docker Hub: https://hub.docker.com/r/garland/aws-cli-docker/
## Getting your AWS Keys:
[http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-set-up.html#cli-signup](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-set-up.html#cli-signup)
## Passing your keys into this container via environmental variables:
[http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-environment](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-environment)
## Command line options for things like setting the region
[http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-command-line](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-command-line)
## You can run any commands available to the AWS CLI
[http://docs.aws.amazon.com/cli/latest/index.html](http://docs.aws.amazon.com/cli/latest/index.html)
## Example Usage:
### Describe an instance:
docker run \
--env AWS_ACCESS_KEY_ID=<<YOUR_ACCESS_KEY>> \
--env AWS_SECRET_ACCESS_KEY=<<YOUR_SECRET_ACCESS>> \
--env AWS_DEFAULT_REGION=us-east-1 \
garland/aws-cli-docker \
aws ec2 describe-instances --instance-ids i-90949d7a
output:
{
"Reservations": [
{
"OwnerId": "960288280607",
"ReservationId": "r-1bb15137",
"Groups": [],
"RequesterId": "226008221399",
"Instances": [
{
"Monitoring": {
"State": "enabled"
},
"PublicDnsName": null,
...
...
}
### Return a list of items in s3 bucket
docker run \
--env AWS_ACCESS_KEY_ID=<<YOUR_ACCESS_KEY>> \
--env AWS_SECRET_ACCESS_KEY=<<YOUR_SECRET_ACCESS>> \
garland/aws-cli-docker \
aws s3 ls
output:
2014-06-03 19:41:30 folder1
2014-06-06 23:02:29 folder2
### Upload content of your current directory (say it contains two files _test.txt_ and _test2.txt_) to s3 bucket
docker run \
--env AWS_ACCESS_KEY_ID=<<YOUR_ACCESS_KEY>> \
--env AWS_SECRET_ACCESS_KEY=<<YOUR_SECRET_ACCESS>> \
-v $PWD:/data
garland/aws-cli-docker \
aws s3 --dryrun sync . s3://mybucket
output:
(dryrun) upload: test.txt to s3://mybucket/test.txt
(dryrun) upload: test2.txt to s3://mybucket/test2.txt
doc: http://docs.aws.amazon.com/cli/latest/reference/s3/index.html
### Retrieve a decrypted Windows password by passing in your private key
We will map the private keys that resides on your local system to inside the container
docker run \
-v <<LOCATION_TO_YOUR_PRIVATE_KEYy>>:/tmp/key.pem \
--env AWS_ACCESS_KEY_ID=<<YOUR_ACCESS_KEY>> \
--env AWS_SECRET_ACCESS_KEY=<<YOUR_SECRET_ACCESS>> \
--env AWS_DEFAULT_REGION=us-east-1 \
garland/aws-cli-docker \
aws ec2 get-password-data --instance-id <<YOUR_INSTANCE_ID>> --priv-launch-key /tmp/key.pem
Output:
{
"InstanceId": "i-90949d7a",
"Timestamp": "2014-12-11T01:18:27.000Z",
"PasswordData": "8pa%o?foo"
}
doc: http://docs.aws.amazon.com/cli/latest/reference/ec2/get-password-data.html

203
aws-cli/run.sh Executable file
View File

@ -0,0 +1,203 @@
#!/usr/bin/env bash
SCRIPT_PATH=$( cd "$(dirname "$0")" ; pwd -P )
IMAGE_NAME="alpine"
IMAGE_VERSION="3.4"
SERVICE_NAME=aws-cli
source ${SCRIPT_PATH}/../common.shinc
#
# Project specific variables
#
AWS_DEFAULT_REGION=eu-central-1
CUSTOM_ARGS=""
CURRENT_DATE="$(date +%Y-%m-%d)"
EXPIRES=$(date '+%a, %d %b %Y 00:00:00 GMT' -d "${CURRENT_DATE} + 365 day")
CACHE_MAX_AGE="31536000"
ACL="public-read"
source ${SCRIPT_PATH}/env.shinc 2> /dev/null
REQUIRED_VARIABLES=(AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY)
for _var in ${REQUIRED_VARIABLES[@]}; do
if [ -z ${!_var+x} ]; then
echo "Please provide credential in env.shinc file, missing definition of variable: ${_var}"
exit 2
fi
done
init() {
__build
docker create \
--name ${SERVICE_NAME} \
${SERVICE_NAME}:latest
[[ $? -ne 0 ]] && return 1
}
cmd() {
local localDir
read localDir
if [[ ${localDir} = "" ]]; then
localDir="$(pwd -P)"
__warn "You have not provided a directory, using current path: ${localDir}"
__msg "Continue? [(y)/n]"
read CONTINUE
if [[ ${CONTINUE} != "" ]] && [[ ${CONTINUE} != "y" ]] && [[ ${CONTINUE} != "Y" ]]; then
return 0
fi
fi
__warn "Mounting ${localDir} to /mnt/uploads inside the docker container"
docker run --rm \
-u $(id -u) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
-v ${localDir}:/mnt/uploads \
${CUSTOM_ARGS} \
${SERVICE_NAME} \
aws "$@"
}
upload() {
local source="$1"
shift
local target="$1"
shift
local args="$@"
[[ ${source} = "" ]] && __err "You must provide a source directory (filesystem) as the first parameter" && return 137
[[ ${target} = "" ]] && __err "You must provide a target directory (s3) as the second parameter" && return 137
[[ ! -e ${source} ]] && __err "The source does not exist: ${source}" && return 6
__warn "Uploading ${source} to s3://${target}"
__msg "Cache expires on ${EXPIRES}"
docker run --rm \
-u $(id -u) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
-v ${source}:/mnt/uploads \
${SERVICE_NAME} \
aws s3 cp /mnt/uploads s3://${target} ${args} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE
}
download() {
local source="$1"
shift
local target="$1"
shift
local args="$@"
[[ ${source} = "" ]] && __err "You must provide a source (filesystem) as the first parameter" && return 137
[[ ${target} = "" ]] && __err "You must provide a target (s3) as the second parameter" && return 137
__warn "Downloading s3://${source} to ${target}"
if [[ ! -e ${target} ]]; then
__warn "The target ${target} does not exist. Is this a file (f) or a directory (d)? ..."
read type
if [[ ${type} = "f" ]]; then
mkdir -p $(dirname ${target})
touch ${target}
elif [[ ${type} = "d" ]]; then
mkdir -p ${target}
else
__err "The input is invalid, please use either 'f' or 'd'"
fi
fi
docker run --rm \
-u $(id -u) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
-v ${target}:/mnt/uploads \
${SERVICE_NAME} \
aws s3 cp s3://${source} /mnt/uploads ${args} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE
}
migrate() {
local source_dir="$1"
shift
local target_dir="$1"
shift
local args="$@"
[[ ${source_dir} = "" ]] && __err "You must provide a source directory (s3) as the first parameter" && return 137
[[ ${target_dir} = "" ]] && __err "You must provide a target directory (s3) as the second parameter" && return 137
__warn "Migrating s3://${source} to s3://${target}"
echo "Cache expires on ${EXPIRES}"
docker run --rm \
-u $(id -u) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
${SERVICE_NAME} \
aws s3 cp s3://${source_dir} s3://${target_dir} ${args} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE
}
setPermissions() {
local target_dir="$1"
if [[ ${target_dir} = "" ]]; then
__warn "Please provide an s3 path: "
read target_dir
fi
docker run --rm \
-u $(id -u) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
${SERVICE_NAME} \
aws s3 cp s3://${target_dir} s3://${target_dir} --recursive --acl "${ACL}" --expires "${EXPIRES}" --cache-control "max-age=${CACHE_MAX_AGE}" --metadata-directive REPLACE
}
exec() {
local source_dir="$1"
if [[ ${source_dir} = "" ]]; then
__warn "You have not provided a directory, using current path: ${localDir}"
__msg "Continue? [(y)/n]"
read CONTINUE
if [[ ${CONTINUE} != "" ]] && [[ ${CONTINUE} != "y" ]] && [[ ${CONTINUE} != "Y" ]]; then
return 0
fi
fi
docker run --rm -it \
-u $(id -u) \
--env AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \
--env AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \
--env AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} \
-v ${source_dir}:/mnt/uploads \
${SERVICE_NAME} \
sh
}
"$@"
exit $?