diff --git a/Dockerfile b/Dockerfile index 661bed4397aa4..23a4c5c656800 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,9 +38,11 @@ RUN echo deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty main > /etc/apt/s # Packaged dependencies RUN apt-get update && apt-get install -y \ apparmor \ + apt-utils \ aufs-tools \ automake \ bash-completion \ + bsdmainutils \ btrfs-tools \ build-essential \ clang-3.8 \ @@ -64,12 +66,12 @@ RUN apt-get update && apt-get install -y \ python-mock \ python-pip \ python-websocket \ - s3cmd=1.5.0* \ ubuntu-zfs \ xfsprogs \ libzfs-dev \ tar \ --no-install-recommends \ + && pip install awscli==1.10.15 \ && ln -snf /usr/bin/clang-3.8 /usr/local/bin/clang \ && ln -snf /usr/bin/clang++-3.8 /usr/local/bin/clang++ @@ -187,13 +189,6 @@ RUN git clone https://github.com/docker/docker-py.git /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ && pip install -r test-requirements.txt -# Setup s3cmd config -RUN { \ - echo '[default]'; \ - echo 'access_key=$AWS_ACCESS_KEY'; \ - echo 'secret_key=$AWS_SECRET_KEY'; \ - } > ~/.s3cfg - # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64 index 85c1e7ec2dd61..3d50968eec8b6 100644 --- a/Dockerfile.aarch64 +++ b/Dockerfile.aarch64 @@ -136,13 +136,6 @@ RUN git clone https://github.com/docker/docker-py.git /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ && pip install -r test-requirements.txt -# Setup s3cmd config -RUN { \ - echo '[default]'; \ - echo 'access_key=$AWS_ACCESS_KEY'; \ - echo 'secret_key=$AWS_SECRET_KEY'; \ - } > ~/.s3cfg - # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' diff --git a/hack/install.sh b/hack/install.sh index 218ad99c8c560..d4c2ef4f17bd1 100755 --- a/hack/install.sh +++ b/hack/install.sh @@ -20,7 +20,7 @@ set -e # To update this script on https://get.docker.com, # use hack/release.sh during a normal release, # or the following one-liner for script hotfixes: -# s3cmd put --acl-public -P hack/install.sh s3://get.docker.com/index +# aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index # url="https://get.docker.com/" diff --git a/hack/make/.integration-daemon-start b/hack/make/.integration-daemon-start index ba466b7c001e4..ab4c8aaec19e9 100644 --- a/hack/make/.integration-daemon-start +++ b/hack/make/.integration-daemon-start @@ -9,6 +9,13 @@ if ! command -v docker &> /dev/null; then false fi +if [ -z "$DOCKER_TEST_HOST" ]; then + if docker version &> /dev/null; then + echo >&2 'skipping daemon start, since daemon appears to be already started' + return + fi +fi + # intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers exec 41>&1 42>&2 diff --git a/hack/make/release-deb b/hack/make/release-deb index 9b0b3ca02a979..946e5de813170 100755 --- a/hack/make/release-deb +++ b/hack/make/release-deb @@ -14,6 +14,7 @@ set -e # # ... and so on and so forth for the builds created by hack/make/build-deb +source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch" : ${DOCKER_RELEASE_DIR:=$DEST} diff --git a/hack/make/release-rpm b/hack/make/release-rpm index 5ed25cbe190c8..b952b79e06452 100755 --- a/hack/make/release-rpm +++ b/hack/make/release-rpm @@ -14,6 +14,7 @@ set -e # # ... and so on and so forth for the builds created by hack/make/build-rpm +source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch" : ${DOCKER_RELEASE_DIR:=$DEST} diff --git a/hack/release.sh b/hack/release.sh index e9a239c1e0aa4..b00a500fecd3c 100755 --- a/hack/release.sh +++ b/hack/release.sh @@ -43,7 +43,7 @@ cd /go/src/github.com/docker/docker [ -x hack/make.sh ] || usage export AWS_DEFAULT_REGION -: ${AWS_DEFAULT_REGION:=us-west-2} +: ${AWS_DEFAULT_REGION:=us-west-1} RELEASE_BUNDLES=( binary @@ -79,8 +79,6 @@ fi setup_s3() { echo "Setting up S3" - # TODO: Move to Dockerfile - pip install awscli==1.10.15 # Try creating the bucket. Ignore errors (it might already exist). aws s3 mb "s3://$BUCKET" 2>/dev/null || true # Check access to the bucket. @@ -104,8 +102,7 @@ s3_url() { echo "https://$BUCKET_PATH" ;; *) - # TODO: remove s3cmd dependency - BASE_URL=$( s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' ) + BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com" if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then echo "$BASE_URL/$AWS_S3_BUCKET_PATH" else diff --git a/project/RELEASE-CHECKLIST.md b/project/RELEASE-CHECKLIST.md index 2b0e7caae3b34..b9dcf7f4eaae4 100644 --- a/project/RELEASE-CHECKLIST.md +++ b/project/RELEASE-CHECKLIST.md @@ -270,8 +270,9 @@ docker build -t docker . # static binaries are still pushed to s3 docker run \ -e AWS_S3_BUCKET=test.docker.com \ - -e AWS_ACCESS_KEY \ - -e AWS_SECRET_KEY \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ -i -t --privileged \ docker \ hack/release.sh @@ -434,8 +435,9 @@ docker build -t docker . # static binaries are still pushed to s3 docker run \ -e AWS_S3_BUCKET=get.docker.com \ - -e AWS_ACCESS_KEY \ - -e AWS_SECRET_KEY \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e AWS_DEFAULT_REGION \ -i -t --privileged \ docker \ hack/release.sh