Console Output
Skipping 52 KB..
Full Log++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ mktemp
+ script=/tmp/tmp.XLke5RqBKB
+ cat
+ chmod +x /tmp/tmp.XLke5RqBKB
+ scp -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.XLke5RqBKB openshiftdevel:/tmp/tmp.XLke5RqBKB
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.XLke5RqBKB"'
+ cd /home/origin
++ jq --compact-output '.buildid |= "975"'
+ JOB_SPEC='{"type":"presubmit","job":"ci-kubernetes-aws-actuator","buildid":"975","prowjobid":"54633cdd-cf1f-11e9-ab71-0a58ac108d31","refs":{"org":"openshift","repo":"cluster-api-provider-aws","repo_link":"https://github.com/openshift/cluster-api-provider-aws","base_ref":"master","base_sha":"d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","base_link":"https://github.com/openshift/cluster-api-provider-aws/commit/d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","pulls":[{"number":258,"author":"ingvagabund","sha":"6fc8f0a260389bc1798d5bc6fea49264a324669f","link":"https://github.com/openshift/cluster-api-provider-aws/pull/258","commit_link":"https://github.com/openshift/cluster-api-provider-aws/pull/258/commits/6fc8f0a260389bc1798d5bc6fea49264a324669f","author_link":"https://github.com/ingvagabund"}]}}'
+ for image in ''\''registry.svc.ci.openshift.org/ci/clonerefs:latest'\''' ''\''registry.svc.ci.openshift.org/ci/initupload:latest'\'''
+ (( i = 0 ))
+ (( i < 5 ))
+ docker pull registry.svc.ci.openshift.org/ci/clonerefs:latest
Trying to pull repository registry.svc.ci.openshift.org/ci/clonerefs ...
latest: Pulling from registry.svc.ci.openshift.org/ci/clonerefs
1160f4abea84: Pulling fs layer
be60dbe7622d: Pulling fs layer
d26b76701841: Pulling fs layer
1b90cab916ea: Pulling fs layer
3a00cbb24bdb: Pulling fs layer
1b90cab916ea: Waiting
3a00cbb24bdb: Waiting
be60dbe7622d: Verifying Checksum
be60dbe7622d: Download complete
1160f4abea84: Download complete
d26b76701841: Verifying Checksum
d26b76701841: Download complete
3a00cbb24bdb: Verifying Checksum
3a00cbb24bdb: Download complete
1b90cab916ea: Verifying Checksum
1b90cab916ea: Download complete
1160f4abea84: Pull complete
be60dbe7622d: Pull complete
d26b76701841: Pull complete
1b90cab916ea: Pull complete
3a00cbb24bdb: Pull complete
Digest: sha256:d68e1c6c2de5c1167a79b24d5ba4f909349ca7a44fb634e214bdadc2c8b010cd
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/clonerefs:latest
+ break
+ for image in ''\''registry.svc.ci.openshift.org/ci/clonerefs:latest'\''' ''\''registry.svc.ci.openshift.org/ci/initupload:latest'\'''
+ (( i = 0 ))
+ (( i < 5 ))
+ docker pull registry.svc.ci.openshift.org/ci/initupload:latest
Trying to pull repository registry.svc.ci.openshift.org/ci/initupload ...
latest: Pulling from registry.svc.ci.openshift.org/ci/initupload
a073c86ecf9e: Pulling fs layer
cc3fc741b1a9: Pulling fs layer
8f72556ef119: Pulling fs layer
8e5b170ec95b: Pulling fs layer
8e5b170ec95b: Waiting
a073c86ecf9e: Download complete
cc3fc741b1a9: Verifying Checksum
cc3fc741b1a9: Download complete
8e5b170ec95b: Verifying Checksum
8e5b170ec95b: Download complete
8f72556ef119: Verifying Checksum
8f72556ef119: Download complete
a073c86ecf9e: Pull complete
cc3fc741b1a9: Pull complete
8f72556ef119: Pull complete
8e5b170ec95b: Pull complete
Digest: sha256:e651a6455ada7c070c439eddcd753e2e2ac1fb934c4f2a526c37a4674c8eaee4
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/initupload:latest
+ break
+ clonerefs_args=
+ docker run -v /data:/data:z registry.svc.ci.openshift.org/ci/clonerefs:latest --src-root=/data --log=/data/clone.json --repo=openshift,cluster-api-provider-aws=master:d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41,258:6fc8f0a260389bc1798d5bc6fea49264a324669f
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-api-provider-aws","base_ref":"master","base_sha":"d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","pulls":[{"number":258,"author":"","sha":"6fc8f0a260389bc1798d5bc6fea49264a324669f"}]},"time":"2019-09-04T14:25:32Z"}
{"command":"mkdir -p /data/src/github.com/openshift/cluster-api-provider-aws","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T14:25:32Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-api-provider-aws/.git/\n","time":"2019-09-04T14:25:32Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T14:25:32Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T14:25:32Z"}
{"command":"git fetch https://github.com/openshift/cluster-api-provider-aws.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-aws\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] v0.1.0 -\u003e v0.1.0\n * [new tag] v0.2.0 -\u003e v0.2.0\n","time":"2019-09-04T14:25:36Z"}
{"command":"git fetch https://github.com/openshift/cluster-api-provider-aws.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-aws\n * branch master -\u003e FETCH_HEAD\n","time":"2019-09-04T14:25:36Z"}
{"command":"git checkout d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'd4644b21c9dbeeb79215ad42e8d7f932dd3a5f41'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at d4644b21... Merge pull request #256 from mgugino-upstream-stage/fix-invalid-delete\n","time":"2019-09-04T14:25:36Z"}
{"command":"git branch --force master d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T14:25:36Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-04T14:25:37Z"}
{"command":"git fetch https://github.com/openshift/cluster-api-provider-aws.git pull/258/head","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-aws\n * branch refs/pull/258/head -\u003e FETCH_HEAD\n","time":"2019-09-04T14:25:37Z"}
{"command":"git merge --no-ff 6fc8f0a260389bc1798d5bc6fea49264a324669f","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Merge made by the 'recursive' strategy.\n Makefile | 3 ++-\n pkg/apis/awsproviderconfig/v1beta1/zz_generated.deepcopy.go | 2 +-\n pkg/client/mock/client_generated.go | 3 ++-\n 3 files changed, 5 insertions(+), 3 deletions(-)\n","time":"2019-09-04T14:25:37Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T14:25:37Z"}
{"component":"clonerefs","file":"prow/cmd/clonerefs/main.go:43","func":"main.main","level":"info","msg":"Finished cloning refs","time":"2019-09-04T14:25:37Z"}
+ docker run -e 'JOB_SPEC={"type":"presubmit","job":"ci-kubernetes-aws-actuator","buildid":"975","prowjobid":"54633cdd-cf1f-11e9-ab71-0a58ac108d31","refs":{"org":"openshift","repo":"cluster-api-provider-aws","repo_link":"https://github.com/openshift/cluster-api-provider-aws","base_ref":"master","base_sha":"d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","base_link":"https://github.com/openshift/cluster-api-provider-aws/commit/d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","pulls":[{"number":258,"author":"ingvagabund","sha":"6fc8f0a260389bc1798d5bc6fea49264a324669f","link":"https://github.com/openshift/cluster-api-provider-aws/pull/258","commit_link":"https://github.com/openshift/cluster-api-provider-aws/pull/258/commits/6fc8f0a260389bc1798d5bc6fea49264a324669f","author_link":"https://github.com/ingvagabund"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/initupload:latest --clone-log=/data/clone.json --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/started.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:25:39Z"}
{"component":"initupload","dest":"pr-logs/directory/ci-kubernetes-aws-actuator/975.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:25:39Z"}
{"component":"initupload","dest":"pr-logs/directory/ci-kubernetes-aws-actuator/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:25:39Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:25:39Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/clone-log.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:25:39Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/clone-records.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:25:39Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/clone-log.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:25:40Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/started.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:25:40Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:25:40Z"}
{"component":"initupload","dest":"pr-logs/directory/ci-kubernetes-aws-actuator/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:25:40Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/clone-records.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:25:40Z"}
{"component":"initupload","dest":"pr-logs/directory/ci-kubernetes-aws-actuator/975.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:25:40Z"}
{"component":"initupload","file":"prow/gcsupload/run.go:65","func":"k8s.io/test-infra/prow/gcsupload.Options.Run","level":"info","msg":"Finished upload to GCS","time":"2019-09-04T14:25:40Z"}
+ sudo chmod -R a+rwX /data
+ sudo chown -R origin:origin-git /data
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: SYNC REPOSITORIES [00h 01m 22s] ##########
[workspace] $ /bin/bash /tmp/jenkins8236251764415298432.sh
########## STARTING STAGE: FORWARD PARAMETERS TO THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod o+rw /etc/environment
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_NAME=ci-kubernetes-aws-actuator'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_NUMBER=975'\'' >> /etc/environment'
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: FORWARD PARAMETERS TO THE REMOTE HOST [00h 00m 01s] ##########
[workspace] $ /bin/bash /tmp/jenkins1990493949780136937.sh
########## STARTING STAGE: UPLOAD THE DEFAULT AWS CREDENTIASL ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'mkdir -p ~/.aws'
+ scp -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.aws/credentials 'openshiftdevel:~/.aws'
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'mkdir -p ~/.ssh'
+ scp -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.ssh/devenv.pem 'openshiftdevel:~/.ssh/devenv.pem'
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: UPLOAD THE DEFAULT AWS CREDENTIASL [00h 00m 02s] ##########
[workspace] $ /bin/bash /tmp/jenkins4702004125050890374.sh
########## STARTING STAGE: INSTALL MINIKUBE ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ mktemp
+ script=/tmp/tmp.kmpZdVRdxK
+ cat
+ chmod +x /tmp/tmp.kmpZdVRdxK
+ scp -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.kmpZdVRdxK openshiftdevel:/tmp/tmp.kmpZdVRdxK
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.kmpZdVRdxK"'
+ cd /home/origin
+ curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.30.0/minikube-linux-amd64
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
53 40.3M 53 21.7M 0 0 17.3M 0 0:00:02 0:00:01 0:00:01 17.4M
100 40.3M 100 40.3M 0 0 28.1M 0 0:00:01 0:00:01 --:--:-- 28.1M
+ chmod +x minikube
+ sudo mv minikube /usr/bin/
+ curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.10.0/bin/linux/amd64/kubectl
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
100 51.7M 100 51.7M 0 0 68.7M 0 --:--:-- --:--:-- --:--:-- 68.8M
+ chmod +x kubectl
+ sudo mv kubectl /usr/bin/
+ curl -Lo crictl-v1.12.0-linux-amd64.tar.gz https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.12.0/crictl-v1.12.0-linux-amd64.tar.gz
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
100 623 0 623 0 0 3099 0 --:--:-- --:--:-- --:--:-- 3099
100 7237k 100 7237k 0 0 23.4M 0 --:--:-- --:--:-- --:--:-- 23.4M
+ tar -xvf crictl-v1.12.0-linux-amd64.tar.gz
crictl
+ sudo mv crictl /usr/bin/
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL MINIKUBE [00h 00m 04s] ##########
[workspace] $ /bin/bash /tmp/jenkins4383539577637293953.sh
########## STARTING STAGE: DEPLOY KUBERNETES ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ mktemp
+ script=/tmp/tmp.kG8zMBzG3N
+ cat
+ chmod +x /tmp/tmp.kG8zMBzG3N
+ scp -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.kG8zMBzG3N openshiftdevel:/tmp/tmp.kG8zMBzG3N
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.kG8zMBzG3N"'
+ cd /home/origin
+ sudo setenforce 0
+ sudo minikube start --vm-driver=none --extra-config=kubelet.cgroup-driver=systemd --kubernetes-version v1.12.0 --v 5
There is a newer version of minikube available (v1.3.1). Download it here:
https://github.com/kubernetes/minikube/releases/tag/v1.3.1
To disable this notification, run the following:
minikube config set WantUpdateNotification false
Starting local Kubernetes v1.12.0 cluster...
Starting VM...
Creating CA: /root/.minikube/certs/ca.pem
Creating client certificate: /root/.minikube/certs/cert.pem
Getting VM IP address...
Moving files into cluster...
Downloading kubeadm v1.12.0
Downloading kubelet v1.12.0
Finished Downloading kubeadm v1.12.0
Finished Downloading kubelet v1.12.0
Setting up certs...
Connecting to cluster...
Setting up kubeconfig...
Starting cluster components...
Kubectl is now configured to use the cluster.
===================
WARNING: IT IS RECOMMENDED NOT TO RUN THE NONE DRIVER ON PERSONAL WORKSTATIONS
The 'none' driver will run an insecure kubernetes apiserver as root that may leave the host vulnerable to CSRF attacks
When using the none driver, the kubectl config and credentials generated will be root owned and will appear in the root home directory.
You will need to move the files to the appropriate location and then set the correct permissions. An example of this is below:
sudo mv /root/.kube $HOME/.kube # this will write over any previous configuration
sudo chown -R $USER $HOME/.kube
sudo chgrp -R $USER $HOME/.kube
sudo mv /root/.minikube $HOME/.minikube # this will write over any previous configuration
sudo chown -R $USER $HOME/.minikube
sudo chgrp -R $USER $HOME/.minikube
This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true
Loading cached images from config file.
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY KUBERNETES [00h 01m 03s] ##########
[workspace] $ /bin/bash /tmp/jenkins707550653227925038.sh
########## STARTING STAGE: INSTALL GO 1.10.1 ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ mktemp
+ script=/tmp/tmp.Vcz4jNnyHz
+ cat
+ chmod +x /tmp/tmp.Vcz4jNnyHz
+ scp -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.Vcz4jNnyHz openshiftdevel:/tmp/tmp.Vcz4jNnyHz
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.Vcz4jNnyHz"'
+ cd /home/origin
+ mkdir -p /home/origin/bin
+ curl -sL -o /home/origin/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
+ chmod +x /home/origin/bin/gimme
+ gimme 1.10.1
unset GOOS;
unset GOARCH;
export GOROOT='/home/origin/.gimme/versions/go1.10.1.linux.amd64';
export PATH="/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:${PATH}";
go version >&2;
export GIMME_ENV="/home/origin/.gimme/envs/go1.10.1.env"
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL GO 1.10.1 [00h 00m 07s] ##########
[workspace] $ /bin/bash /tmp/jenkins6010614290113494595.sh
########## STARTING STAGE: BUILD THE MACHINE CONTROLLER ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ mktemp
+ script=/tmp/tmp.7wVavYEBGa
+ cat
+ chmod +x /tmp/tmp.7wVavYEBGa
+ scp -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.7wVavYEBGa openshiftdevel:/tmp/tmp.7wVavYEBGa
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.7wVavYEBGa"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/cluster-api-provider-aws
+ source /home/origin/.gimme/envs/go1.10.1.env
++ unset GOOS
++ unset GOARCH
++ export GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64
++ GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64
++ export PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin
++ PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin
++ go version
go version go1.10.1 linux/amd64
+ go get -u github.com/openshift/imagebuilder/cmd/imagebuilder
+ sudo mv /data/bin/imagebuilder /usr/bin
+ sed -i 's/FROM registry\.svc\.ci\.openshift\.org\/openshift\/origin-v4\.0:base/FROM docker\.io\/gofed\/base:baseci/' Dockerfile
+ sudo make images NO_DOCKER=1
imagebuilder -t "origin-aws-machine-controllers:v0.2.0-173-gb8c169e" -t "origin-aws-machine-controllers:latest" ./
--> Image registry.svc.ci.openshift.org/openshift/release:golang-1.12 was not found, pulling ...
--> Pulled 0/2 layers, 12% complete
--> Pulled 1/2 layers, 56% complete
--> Pulled 2/2 layers, 100% complete
--> Extracting
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder
--> WORKDIR /go/src/sigs.k8s.io/cluster-api-provider-aws
--> COPY . .
--> RUN unset VERSION && GOPROXY=off NO_DOCKER=1 make build
go build -o "bin/machine-controller-manager" \
-ldflags "-X sigs.k8s.io/cluster-api-provider-aws/pkg/version.Raw=v0.2.0-173-gb8c169e -extldflags "-static"" "sigs.k8s.io/cluster-api-provider-aws/cmd/manager"
go build -o bin/manager -ldflags '-extldflags "-static"' \
"sigs.k8s.io/cluster-api-provider-aws/vendor/github.com/openshift/cluster-api/cmd/manager"
--> Image docker.io/gofed/base:baseci was not found, pulling ...
--> Pulled 1/2 layers, 81% complete
--> Pulled 2/2 layers, 100% complete
--> Extracting
--> FROM docker.io/gofed/base:baseci as 1
--> RUN INSTALL_PKGS=" openssh " && yum install -y $INSTALL_PKGS && rpm -V $INSTALL_PKGS && yum clean all
Loaded plugins: fastestmirror, ovl
Determining fastest mirrors
* base: mirrors.advancedhosters.com
* extras: mirrors.advancedhosters.com
* updates: mirrors.advancedhosters.com
Resolving Dependencies
--> Running transaction check
---> Package openssh.x86_64 0:7.4p1-16.el7 will be installed
--> Processing Dependency: libfipscheck.so.1()(64bit) for package: openssh-7.4p1-16.el7.x86_64
--> Running transaction check
---> Package fipscheck-lib.x86_64 0:1.4.1-6.el7 will be installed
--> Processing Dependency: /usr/bin/fipscheck for package: fipscheck-lib-1.4.1-6.el7.x86_64
--> Running transaction check
---> Package fipscheck.x86_64 0:1.4.1-6.el7 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================
Package Arch Version Repository Size
================================================================================
Installing:
openssh x86_64 7.4p1-16.el7 base 510 k
Installing for dependencies:
fipscheck x86_64 1.4.1-6.el7 base 21 k
fipscheck-lib x86_64 1.4.1-6.el7 base 11 k
Transaction Summary
================================================================================
Install 1 Package (+2 Dependent packages)
Total download size: 542 k
Installed size: 2.0 M
Downloading packages:
--------------------------------------------------------------------------------
Total 683 kB/s | 542 kB 00:00
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : fipscheck-1.4.1-6.el7.x86_64 1/3
Installing : fipscheck-lib-1.4.1-6.el7.x86_64 2/3
Installing : openssh-7.4p1-16.el7.x86_64 3/3
Verifying : fipscheck-lib-1.4.1-6.el7.x86_64 1/3
Verifying : fipscheck-1.4.1-6.el7.x86_64 2/3
Verifying : openssh-7.4p1-16.el7.x86_64 3/3
Installed:
openssh.x86_64 0:7.4p1-16.el7
Dependency Installed:
fipscheck.x86_64 0:1.4.1-6.el7 fipscheck-lib.x86_64 0:1.4.1-6.el7
Complete!
Loaded plugins: fastestmirror, ovl
Cleaning repos: base cbs-paas7-openshift-multiarch-el7-build extras updates
Cleaning up list of fastest mirrors
--> COPY --from=builder /go/src/sigs.k8s.io/cluster-api-provider-aws/bin/manager /
--> COPY --from=builder /go/src/sigs.k8s.io/cluster-api-provider-aws/bin/machine-controller-manager /
--> Committing changes to origin-aws-machine-controllers:v0.2.0-173-gb8c169e ...
--> Tagged as origin-aws-machine-controllers:latest
--> Done
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: BUILD THE MACHINE CONTROLLER [00h 01m 56s] ##########
[workspace] $ /bin/bash /tmp/jenkins6148646408611005647.sh
########## STARTING STAGE: CREATE CLUSTER RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ mktemp
+ script=/tmp/tmp.PSf7psfbst
+ cat
+ chmod +x /tmp/tmp.PSf7psfbst
+ scp -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.PSf7psfbst openshiftdevel:/tmp/tmp.PSf7psfbst
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.PSf7psfbst"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/cluster-api-provider-aws/
+ sudo pip install awscli
Collecting awscli
Downloading https://files.pythonhosted.org/packages/3e/02/5b191b339489e492f3f02dbf1f3b9b586872e22183e22e99f7eda076fc89/awscli-1.16.231-py2.py3-none-any.whl (2.0MB)
Collecting colorama<=0.3.9,>=0.2.5 (from awscli)
Downloading https://files.pythonhosted.org/packages/db/c8/7dcf9dbcb22429512708fe3a547f8b6101c0d02137acbd892505aee57adf/colorama-0.3.9-py2.py3-none-any.whl
Collecting docutils<0.16,>=0.10 (from awscli)
Downloading https://files.pythonhosted.org/packages/3a/dc/bf2b15d1fa15a6f7a9e77a61b74ecbbae7258558fcda8ffc9a6638a6b327/docutils-0.15.2-py2-none-any.whl (548kB)
Requirement already satisfied (use --upgrade to upgrade): PyYAML<=5.2,>=3.10; python_version != "2.6" in /usr/lib64/python2.7/site-packages (from awscli)
Collecting rsa<=3.5.0,>=3.1.2 (from awscli)
Downloading https://files.pythonhosted.org/packages/e1/ae/baedc9cb175552e95f3395c43055a6a5e125ae4d48a1d7a924baca83e92e/rsa-3.4.2-py2.py3-none-any.whl (46kB)
Collecting s3transfer<0.3.0,>=0.2.0 (from awscli)
Downloading https://files.pythonhosted.org/packages/16/8a/1fc3dba0c4923c2a76e1ff0d52b305c44606da63f718d14d3231e21c51b0/s3transfer-0.2.1-py2.py3-none-any.whl (70kB)
Collecting botocore==1.12.221 (from awscli)
Downloading https://files.pythonhosted.org/packages/1f/1d/53e32d29f441d36faa31790776603f8e20573f5583bac00e656e26ae1b69/botocore-1.12.221-py2.py3-none-any.whl (5.7MB)
Requirement already satisfied (use --upgrade to upgrade): pyasn1>=0.1.3 in /usr/lib/python2.7/site-packages (from rsa<=3.5.0,>=3.1.2->awscli)
Collecting futures<4.0.0,>=2.2.0; python_version == "2.6" or python_version == "2.7" (from s3transfer<0.3.0,>=0.2.0->awscli)
Downloading https://files.pythonhosted.org/packages/d8/a6/f46ae3f1da0cd4361c344888f59ec2f5785e69c872e175a748ef6071cdb5/futures-3.3.0-py2-none-any.whl
Requirement already satisfied (use --upgrade to upgrade): jmespath<1.0.0,>=0.7.1 in /usr/lib/python2.7/site-packages (from botocore==1.12.221->awscli)
Collecting python-dateutil<3.0.0,>=2.1; python_version >= "2.7" (from botocore==1.12.221->awscli)
Downloading https://files.pythonhosted.org/packages/41/17/c62faccbfbd163c7f57f3844689e3a78bae1f403648a6afb1d0866d87fbb/python_dateutil-2.8.0-py2.py3-none-any.whl (226kB)
Collecting urllib3<1.26,>=1.20; python_version == "2.7" (from botocore==1.12.221->awscli)
Downloading https://files.pythonhosted.org/packages/e6/60/247f23a7121ae632d62811ba7f273d0e58972d75e58a94d329d51550a47d/urllib3-1.25.3-py2.py3-none-any.whl (150kB)
Requirement already satisfied (use --upgrade to upgrade): six>=1.5 in /usr/lib/python2.7/site-packages (from python-dateutil<3.0.0,>=2.1; python_version >= "2.7"->botocore==1.12.221->awscli)
Installing collected packages: colorama, docutils, rsa, futures, python-dateutil, urllib3, botocore, s3transfer, awscli
Found existing installation: python-dateutil 1.5
Uninstalling python-dateutil-1.5:
Successfully uninstalled python-dateutil-1.5
Found existing installation: urllib3 1.10.2
Uninstalling urllib3-1.10.2:
Successfully uninstalled urllib3-1.10.2
Successfully installed awscli-1.16.231 botocore-1.12.221 colorama-0.3.9 docutils-0.15.2 futures-3.3.0 python-dateutil-2.8.0 rsa-3.4.2 s3transfer-0.2.1 urllib3-1.25.3
You are using pip version 8.1.2, however version 19.2.3 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
+ curl https://releases.hashicorp.com/terraform/0.11.8/terraform_0.11.8_linux_amd64.zip -o terraform_0.11.8_linux_amd64.zip
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
100 17.0M 100 17.0M 0 0 49.3M 0 --:--:-- --:--:-- --:--:-- 49.2M
+ unzip terraform_0.11.8_linux_amd64.zip
Archive: terraform_0.11.8_linux_amd64.zip
inflating: terraform
+ sudo cp ./terraform /usr/bin/.
/data/src/github.com/openshift/cluster-api-provider-aws/hack /data/src/github.com/openshift/cluster-api-provider-aws
+ export CLUSTER_ID=pr-975-258
+ CLUSTER_ID=pr-975-258
+ pushd hack
+ set +x
+ AWS_DEFAULT_REGION=us-east-1
+ ENVIRONMENT_ID=pr-975-258
+ ./aws-provision.sh install
/data/src/github.com/openshift/cluster-api-provider-aws/hack/prebuild /data/src/github.com/openshift/cluster-api-provider-aws/hack
*** starting terraform with TF_VAR_environment_id=pr-975-258
[0m[1mInitializing modules...[0m
- module.vpc
Found version 1.66.0 of terraform-aws-modules/vpc/aws on registry.terraform.io
Getting source "terraform-aws-modules/vpc/aws"
[0m[1mInitializing provider plugins...[0m
- Checking for available provider plugins on https://releases.hashicorp.com...
- Downloading plugin for provider "aws" (2.26.0)...
The following providers do not have any version constraints in configuration,
so the latest version was installed.
To prevent automatic upgrades to new major versions that may contain breaking
changes, it is recommended to add version = "..." constraints to the
corresponding provider blocks in configuration, with the constraint strings
suggested below.
* provider.aws: version = "~> 2.26"
[0m[1m[32mTerraform has been successfully initialized![0m[32m[0m
[0m[1mRefreshing Terraform state in-memory prior to plan...[0m
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
[0m
[0m[1mdata.aws_region.current: Refreshing state...[0m
[0m[1mdata.aws_availability_zones.azs: Refreshing state...[0m
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
[32m+[0m create
[0m
Terraform will perform the following actions:
[32m [32m+[0m [32maws_iam_instance_profile.test_profile
[0m id: <computed>
arn: <computed>
create_date: <computed>
name: "pr-975-258-worker-profile"
path: "/"
role: "pr-975-258-role"
roles.#: <computed>
unique_id: <computed>
[0m
[0m[32m [32m+[0m [32maws_iam_role.role
[0m id: <computed>
arn: <computed>
assume_role_policy: "{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": \"sts:AssumeRole\",\n \"Principal\": {\n \"Service\": \"ec2.amazonaws.com\"\n },\n \"Effect\": \"Allow\",\n \"Sid\": \"\"\n }\n ]\n}\n"
create_date: <computed>
force_detach_policies: "false"
max_session_duration: "3600"
name: "pr-975-258-role"
path: "/"
unique_id: <computed>
[0m
[0m[32m [32m+[0m [32maws_security_group.cluster_default
[0m id: <computed>
arn: <computed>
description: "pr-975-258 default security policy"
egress.#: <computed>
ingress.#: <computed>
name: "pr-975-258-default"
owner_id: <computed>
revoke_rules_on_delete: "false"
tags.%: "1"
tags.Name: "pr-975-258-worker-sg"
vpc_id: "${module.vpc.vpc_id}"
[0m
[0m[32m [32m+[0m [32maws_security_group_rule.allow_all
[0m id: <computed>
cidr_blocks.#: "1"
cidr_blocks.0: "0.0.0.0/0"
description: "SSH"
from_port: "22"
protocol: "tcp"
security_group_id: "${aws_security_group.cluster_default.id}"
self: "false"
source_security_group_id: <computed>
to_port: "22"
type: "ingress"
[0m
[0m[32m [32m+[0m [32maws_security_group_rule.default_egress
[0m id: <computed>
cidr_blocks.#: "1"
cidr_blocks.0: "0.0.0.0/0"
from_port: "0"
protocol: "-1"
security_group_id: "${aws_security_group.cluster_default.id}"
self: "false"
source_security_group_id: <computed>
to_port: "0"
type: "egress"
[0m
[0m[32m [32m+[0m [32maws_security_group_rule.default_ingress
[0m id: <computed>
cidr_blocks.#: "1"
cidr_blocks.0: "0.0.0.0/0"
from_port: "0"
protocol: "-1"
security_group_id: "${aws_security_group.cluster_default.id}"
self: "false"
source_security_group_id: <computed>
to_port: "0"
type: "ingress"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_internet_gateway.this
[0m id: <computed>
owner_id: <computed>
tags.%: "3"
tags.Environment: "dev"
tags.Name: "vpc-pr-975-258"
tags.Owner: "jenkins"
vpc_id: "${local.vpc_id}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_route.public_internet_gateway
[0m id: <computed>
destination_cidr_block: "0.0.0.0/0"
destination_prefix_list_id: <computed>
egress_only_gateway_id: <computed>
gateway_id: "${aws_internet_gateway.this.id}"
instance_id: <computed>
instance_owner_id: <computed>
nat_gateway_id: <computed>
network_interface_id: <computed>
origin: <computed>
route_table_id: "${aws_route_table.public.id}"
state: <computed>
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_route_table.private[0]
[0m id: <computed>
owner_id: <computed>
propagating_vgws.#: <computed>
route.#: <computed>
tags.%: "3"
tags.Environment: "dev"
tags.Name: "vpc-pr-975-258-private-us-east-1a"
tags.Owner: "jenkins"
vpc_id: "${local.vpc_id}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_route_table.private[1]
[0m id: <computed>
owner_id: <computed>
propagating_vgws.#: <computed>
route.#: <computed>
tags.%: "3"
tags.Environment: "dev"
tags.Name: "vpc-pr-975-258-private-us-east-1b"
tags.Owner: "jenkins"
vpc_id: "${local.vpc_id}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_route_table.private[2]
[0m id: <computed>
owner_id: <computed>
propagating_vgws.#: <computed>
route.#: <computed>
tags.%: "3"
tags.Environment: "dev"
tags.Name: "vpc-pr-975-258-private-us-east-1c"
tags.Owner: "jenkins"
vpc_id: "${local.vpc_id}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_route_table.public
[0m id: <computed>
owner_id: <computed>
propagating_vgws.#: <computed>
route.#: <computed>
tags.%: "3"
tags.Environment: "dev"
tags.Name: "vpc-pr-975-258-public"
tags.Owner: "jenkins"
vpc_id: "${local.vpc_id}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_route_table_association.private[0]
[0m id: <computed>
route_table_id: "${element(aws_route_table.private.*.id, (var.single_nat_gateway ? 0 : count.index))}"
subnet_id: "${element(aws_subnet.private.*.id, count.index)}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_route_table_association.private[1]
[0m id: <computed>
route_table_id: "${element(aws_route_table.private.*.id, (var.single_nat_gateway ? 0 : count.index))}"
subnet_id: "${element(aws_subnet.private.*.id, count.index)}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_route_table_association.private[2]
[0m id: <computed>
route_table_id: "${element(aws_route_table.private.*.id, (var.single_nat_gateway ? 0 : count.index))}"
subnet_id: "${element(aws_subnet.private.*.id, count.index)}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_route_table_association.public[0]
[0m id: <computed>
route_table_id: "${aws_route_table.public.id}"
subnet_id: "${element(aws_subnet.public.*.id, count.index)}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_route_table_association.public[1]
[0m id: <computed>
route_table_id: "${aws_route_table.public.id}"
subnet_id: "${element(aws_subnet.public.*.id, count.index)}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_route_table_association.public[2]
[0m id: <computed>
route_table_id: "${aws_route_table.public.id}"
subnet_id: "${element(aws_subnet.public.*.id, count.index)}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_subnet.private[0]
[0m id: <computed>
arn: <computed>
assign_ipv6_address_on_creation: "false"
availability_zone: "us-east-1a"
availability_zone_id: <computed>
cidr_block: "10.0.1.0/24"
ipv6_cidr_block: <computed>
ipv6_cidr_block_association_id: <computed>
map_public_ip_on_launch: "false"
owner_id: <computed>
tags.%: "3"
tags.Environment: "dev"
tags.Name: "pr-975-258"
tags.Owner: "jenkins"
vpc_id: "${local.vpc_id}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_subnet.private[1]
[0m id: <computed>
arn: <computed>
assign_ipv6_address_on_creation: "false"
availability_zone: "us-east-1b"
availability_zone_id: <computed>
cidr_block: "10.0.2.0/24"
ipv6_cidr_block: <computed>
ipv6_cidr_block_association_id: <computed>
map_public_ip_on_launch: "false"
owner_id: <computed>
tags.%: "3"
tags.Environment: "dev"
tags.Name: "pr-975-258"
tags.Owner: "jenkins"
vpc_id: "${local.vpc_id}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_subnet.private[2]
[0m id: <computed>
arn: <computed>
assign_ipv6_address_on_creation: "false"
availability_zone: "us-east-1c"
availability_zone_id: <computed>
cidr_block: "10.0.3.0/24"
ipv6_cidr_block: <computed>
ipv6_cidr_block_association_id: <computed>
map_public_ip_on_launch: "false"
owner_id: <computed>
tags.%: "3"
tags.Environment: "dev"
tags.Name: "pr-975-258"
tags.Owner: "jenkins"
vpc_id: "${local.vpc_id}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_subnet.public[0]
[0m id: <computed>
arn: <computed>
assign_ipv6_address_on_creation: "false"
availability_zone: "us-east-1a"
availability_zone_id: <computed>
cidr_block: "10.0.101.0/24"
ipv6_cidr_block: <computed>
ipv6_cidr_block_association_id: <computed>
map_public_ip_on_launch: "true"
owner_id: <computed>
tags.%: "3"
tags.Environment: "dev"
tags.Name: "pr-975-258-worker-foo"
tags.Owner: "jenkins"
vpc_id: "${local.vpc_id}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_subnet.public[1]
[0m id: <computed>
arn: <computed>
assign_ipv6_address_on_creation: "false"
availability_zone: "us-east-1b"
availability_zone_id: <computed>
cidr_block: "10.0.102.0/24"
ipv6_cidr_block: <computed>
ipv6_cidr_block_association_id: <computed>
map_public_ip_on_launch: "true"
owner_id: <computed>
tags.%: "3"
tags.Environment: "dev"
tags.Name: "pr-975-258-worker-foo"
tags.Owner: "jenkins"
vpc_id: "${local.vpc_id}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_subnet.public[2]
[0m id: <computed>
arn: <computed>
assign_ipv6_address_on_creation: "false"
availability_zone: "us-east-1c"
availability_zone_id: <computed>
cidr_block: "10.0.103.0/24"
ipv6_cidr_block: <computed>
ipv6_cidr_block_association_id: <computed>
map_public_ip_on_launch: "true"
owner_id: <computed>
tags.%: "3"
tags.Environment: "dev"
tags.Name: "pr-975-258-worker-foo"
tags.Owner: "jenkins"
vpc_id: "${local.vpc_id}"
[0m
[0m[32m [32m+[0m [32mmodule.vpc.aws_vpc.this
[0m id: <computed>
arn: <computed>
assign_generated_ipv6_cidr_block: "false"
cidr_block: "10.0.0.0/16"
default_network_acl_id: <computed>
default_route_table_id: <computed>
default_security_group_id: <computed>
dhcp_options_id: <computed>
enable_classiclink: <computed>
enable_classiclink_dns_support: <computed>
enable_dns_hostnames: "true"
enable_dns_support: "true"
instance_tenancy: "default"
ipv6_association_id: <computed>
ipv6_cidr_block: <computed>
main_route_table_id: <computed>
owner_id: <computed>
tags.%: "3"
tags.Environment: "dev"
tags.Name: "vpc-pr-975-258"
tags.Owner: "jenkins"
[0m
[0m
[0m[1mPlan:[0m 25 to add, 0 to change, 0 to destroy.[0m
[0m[1maws_iam_role.role: Creating...[0m
arn: "" => "<computed>"
assume_role_policy: "" => "{\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": \"sts:AssumeRole\",\n \"Principal\": {\n \"Service\": \"ec2.amazonaws.com\"\n },\n \"Effect\": \"Allow\",\n \"Sid\": \"\"\n }\n ]\n}\n"
create_date: "" => "<computed>"
force_detach_policies: "" => "false"
max_session_duration: "" => "3600"
name: "" => "pr-975-258-role"
path: "" => "/"
unique_id: "" => "<computed>"[0m
[0m[1mmodule.vpc.aws_vpc.this: Creating...[0m
arn: "" => "<computed>"
assign_generated_ipv6_cidr_block: "" => "false"
cidr_block: "" => "10.0.0.0/16"
default_network_acl_id: "" => "<computed>"
default_route_table_id: "" => "<computed>"
default_security_group_id: "" => "<computed>"
dhcp_options_id: "" => "<computed>"
enable_classiclink: "" => "<computed>"
enable_classiclink_dns_support: "" => "<computed>"
enable_dns_hostnames: "" => "true"
enable_dns_support: "" => "true"
instance_tenancy: "" => "default"
ipv6_association_id: "" => "<computed>"
ipv6_cidr_block: "" => "<computed>"
main_route_table_id: "" => "<computed>"
owner_id: "" => "<computed>"
tags.%: "" => "3"
tags.Environment: "" => "dev"
tags.Name: "" => "vpc-pr-975-258"
tags.Owner: "" => "jenkins"[0m
[0m[1maws_iam_role.role: Creation complete after 0s (ID: pr-975-258-role)[0m[0m
[0m[1maws_iam_instance_profile.test_profile: Creating...[0m
arn: "" => "<computed>"
create_date: "" => "<computed>"
name: "" => "pr-975-258-worker-profile"
path: "" => "/"
role: "" => "pr-975-258-role"
roles.#: "" => "<computed>"
unique_id: "" => "<computed>"[0m
[0m[1maws_iam_instance_profile.test_profile: Creation complete after 0s (ID: pr-975-258-worker-profile)[0m[0m
[0m[1mmodule.vpc.aws_vpc.this: Creation complete after 2s (ID: vpc-01b6178fc04d23888)[0m[0m
[0m[1maws_security_group.cluster_default: Creating...[0m
arn: "" => "<computed>"
description: "" => "pr-975-258 default security policy"
egress.#: "" => "<computed>"
ingress.#: "" => "<computed>"
name: "" => "pr-975-258-default"
owner_id: "" => "<computed>"
revoke_rules_on_delete: "" => "false"
tags.%: "" => "1"
tags.Name: "" => "pr-975-258-worker-sg"
vpc_id: "" => "vpc-01b6178fc04d23888"[0m
[0m[1mmodule.vpc.aws_route_table.private[2]: Creating...[0m
owner_id: "" => "<computed>"
propagating_vgws.#: "" => "<computed>"
route.#: "" => "<computed>"
tags.%: "" => "3"
tags.Environment: "" => "dev"
tags.Name: "" => "vpc-pr-975-258-private-us-east-1c"
tags.Owner: "" => "jenkins"
vpc_id: "" => "vpc-01b6178fc04d23888"[0m
[0m[1mmodule.vpc.aws_subnet.private[0]: Creating...[0m
arn: "" => "<computed>"
assign_ipv6_address_on_creation: "" => "false"
availability_zone: "" => "us-east-1a"
availability_zone_id: "" => "<computed>"
cidr_block: "" => "10.0.1.0/24"
ipv6_cidr_block: "" => "<computed>"
ipv6_cidr_block_association_id: "" => "<computed>"
map_public_ip_on_launch: "" => "false"
owner_id: "" => "<computed>"
tags.%: "" => "3"
tags.Environment: "" => "dev"
tags.Name: "" => "pr-975-258"
tags.Owner: "" => "jenkins"
vpc_id: "" => "vpc-01b6178fc04d23888"[0m
[0m[1mmodule.vpc.aws_subnet.public[0]: Creating...[0m
arn: "" => "<computed>"
assign_ipv6_address_on_creation: "" => "false"
availability_zone: "" => "us-east-1a"
availability_zone_id: "" => "<computed>"
cidr_block: "" => "10.0.101.0/24"
ipv6_cidr_block: "" => "<computed>"
ipv6_cidr_block_association_id: "" => "<computed>"
map_public_ip_on_launch: "" => "true"
owner_id: "" => "<computed>"
tags.%: "" => "3"
tags.Environment: "" => "dev"
tags.Name: "" => "pr-975-258-worker-foo"
tags.Owner: "" => "jenkins"
vpc_id: "" => "vpc-01b6178fc04d23888"[0m
[0m[1mmodule.vpc.aws_route_table.private[0]: Creating...[0m
owner_id: "" => "<computed>"
propagating_vgws.#: "" => "<computed>"
route.#: "" => "<computed>"
tags.%: "" => "3"
tags.Environment: "" => "dev"
tags.Name: "" => "vpc-pr-975-258-private-us-east-1a"
tags.Owner: "" => "jenkins"
vpc_id: "" => "vpc-01b6178fc04d23888"[0m
[0m[1mmodule.vpc.aws_route_table.private[1]: Creating...[0m
owner_id: "" => "<computed>"
propagating_vgws.#: "" => "<computed>"
route.#: "" => "<computed>"
tags.%: "" => "3"
tags.Environment: "" => "dev"
tags.Name: "" => "vpc-pr-975-258-private-us-east-1b"
tags.Owner: "" => "jenkins"
vpc_id: "" => "vpc-01b6178fc04d23888"[0m
[0m[1mmodule.vpc.aws_subnet.public[2]: Creating...[0m
arn: "" => "<computed>"
assign_ipv6_address_on_creation: "" => "false"
availability_zone: "" => "us-east-1c"
availability_zone_id: "" => "<computed>"
cidr_block: "" => "10.0.103.0/24"
ipv6_cidr_block: "" => "<computed>"
ipv6_cidr_block_association_id: "" => "<computed>"
map_public_ip_on_launch: "" => "true"
owner_id: "" => "<computed>"
tags.%: "" => "3"
tags.Environment: "" => "dev"
tags.Name: "" => "pr-975-258-worker-foo"
tags.Owner: "" => "jenkins"
vpc_id: "" => "vpc-01b6178fc04d23888"[0m
[0m[1mmodule.vpc.aws_subnet.private[2]: Creating...[0m
arn: "" => "<computed>"
assign_ipv6_address_on_creation: "" => "false"
availability_zone: "" => "us-east-1c"
availability_zone_id: "" => "<computed>"
cidr_block: "" => "10.0.3.0/24"
ipv6_cidr_block: "" => "<computed>"
ipv6_cidr_block_association_id: "" => "<computed>"
map_public_ip_on_launch: "" => "false"
owner_id: "" => "<computed>"
tags.%: "" => "3"
tags.Environment: "" => "dev"
tags.Name: "" => "pr-975-258"
tags.Owner: "" => "jenkins"
vpc_id: "" => "vpc-01b6178fc04d23888"[0m
[0m[1mmodule.vpc.aws_subnet.public[1]: Creating...[0m
arn: "" => "<computed>"
assign_ipv6_address_on_creation: "" => "false"
availability_zone: "" => "us-east-1b"
availability_zone_id: "" => "<computed>"
cidr_block: "" => "10.0.102.0/24"
ipv6_cidr_block: "" => "<computed>"
ipv6_cidr_block_association_id: "" => "<computed>"
map_public_ip_on_launch: "" => "true"
owner_id: "" => "<computed>"
tags.%: "" => "3"
tags.Environment: "" => "dev"
tags.Name: "" => "pr-975-258-worker-foo"
tags.Owner: "" => "jenkins"
vpc_id: "" => "vpc-01b6178fc04d23888"[0m
[0m[1mmodule.vpc.aws_subnet.private[1]: Creating...[0m
arn: "" => "<computed>"
assign_ipv6_address_on_creation: "" => "false"
availability_zone: "" => "us-east-1b"
availability_zone_id: "" => "<computed>"
cidr_block: "" => "10.0.2.0/24"
ipv6_cidr_block: "" => "<computed>"
ipv6_cidr_block_association_id: "" => "<computed>"
map_public_ip_on_launch: "" => "false"
owner_id: "" => "<computed>"
tags.%: "" => "3"
tags.Environment: "" => "dev"
tags.Name: "" => "pr-975-258"
tags.Owner: "" => "jenkins"
vpc_id: "" => "vpc-01b6178fc04d23888"[0m
[0m[1mmodule.vpc.aws_route_table.private[2]: Creation complete after 0s (ID: rtb-0eb95634bb4bdf242)[0m[0m
[0m[1mmodule.vpc.aws_route_table.private[0]: Creation complete after 0s (ID: rtb-01c859705a5683857)[0m[0m
[0m[1mmodule.vpc.aws_route_table.public: Creating...[0m
owner_id: "" => "<computed>"
propagating_vgws.#: "" => "<computed>"
route.#: "" => "<computed>"
tags.%: "" => "3"
tags.Environment: "" => "dev"
tags.Name: "" => "vpc-pr-975-258-public"
tags.Owner: "" => "jenkins"
vpc_id: "" => "vpc-01b6178fc04d23888"[0m
[0m[1mmodule.vpc.aws_internet_gateway.this: Creating...[0m
owner_id: "" => "<computed>"
tags.%: "0" => "3"
tags.Environment: "" => "dev"
tags.Name: "" => "vpc-pr-975-258"
tags.Owner: "" => "jenkins"
vpc_id: "" => "vpc-01b6178fc04d23888"[0m
[0m[1mmodule.vpc.aws_route_table.private[1]: Creation complete after 0s (ID: rtb-0cc4d8ad28295d807)[0m[0m
[0m[1maws_security_group.cluster_default: Creation complete after 0s (ID: sg-03e2fd2a04defa788)[0m[0m
[0m[1maws_security_group_rule.default_egress: Creating...[0m
cidr_blocks.#: "" => "1"
cidr_blocks.0: "" => "0.0.0.0/0"
from_port: "" => "0"
protocol: "" => "-1"
security_group_id: "" => "sg-03e2fd2a04defa788"
self: "" => "false"
source_security_group_id: "" => "<computed>"
to_port: "" => "0"
type: "" => "egress"[0m
[0m[1maws_security_group_rule.default_ingress: Creating...[0m
cidr_blocks.#: "" => "1"
cidr_blocks.0: "" => "0.0.0.0/0"
from_port: "" => "0"
protocol: "" => "-1"
security_group_id: "" => "sg-03e2fd2a04defa788"
self: "" => "false"
source_security_group_id: "" => "<computed>"
to_port: "" => "0"
type: "" => "ingress"[0m
[0m[1mmodule.vpc.aws_route_table.public: Creation complete after 0s (ID: rtb-02d9caddda23f272f)[0m[0m
[0m[1maws_security_group_rule.allow_all: Creating...[0m
cidr_blocks.#: "" => "1"
cidr_blocks.0: "" => "0.0.0.0/0"
description: "" => "SSH"
from_port: "" => "22"
protocol: "" => "tcp"
security_group_id: "" => "sg-03e2fd2a04defa788"
self: "" => "false"
source_security_group_id: "" => "<computed>"
to_port: "" => "22"
type: "" => "ingress"[0m
[0m[1mmodule.vpc.aws_subnet.private[1]: Creation complete after 0s (ID: subnet-0adce266c18f72255)[0m[0m
[0m[1mmodule.vpc.aws_subnet.private[2]: Creation complete after 0s (ID: subnet-078025a95b8289428)[0m[0m
[0m[1mmodule.vpc.aws_subnet.public[2]: Creation complete after 0s (ID: subnet-0508530a7b553cace)[0m[0m
[0m[1mmodule.vpc.aws_internet_gateway.this: Creation complete after 0s (ID: igw-0bcc735e9b9964b0d)[0m[0m
[0m[1mmodule.vpc.aws_subnet.public[0]: Creation complete after 0s (ID: subnet-079ba6af91da7e6e2)[0m[0m
[0m[1mmodule.vpc.aws_route.public_internet_gateway: Creating...[0m
destination_cidr_block: "" => "0.0.0.0/0"
destination_prefix_list_id: "" => "<computed>"
egress_only_gateway_id: "" => "<computed>"
gateway_id: "" => "igw-0bcc735e9b9964b0d"
instance_id: "" => "<computed>"
instance_owner_id: "" => "<computed>"
nat_gateway_id: "" => "<computed>"
network_interface_id: "" => "<computed>"
origin: "" => "<computed>"
route_table_id: "" => "rtb-02d9caddda23f272f"
state: "" => "<computed>"[0m
[0m[1mmodule.vpc.aws_subnet.public[1]: Creation complete after 0s (ID: subnet-0df88ffea33bee0de)[0m[0m
[0m[1mmodule.vpc.aws_subnet.private[0]: Creation complete after 0s (ID: subnet-0527838d60f1601f6)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.public[1]: Creating...[0m
route_table_id: "" => "rtb-02d9caddda23f272f"
subnet_id: "" => "subnet-0df88ffea33bee0de"[0m
[0m[1mmodule.vpc.aws_route_table_association.public[0]: Creating...[0m
route_table_id: "" => "rtb-02d9caddda23f272f"
subnet_id: "" => "subnet-079ba6af91da7e6e2"[0m
[0m[1mmodule.vpc.aws_route_table_association.public[2]: Creating...[0m
route_table_id: "" => "rtb-02d9caddda23f272f"
subnet_id: "" => "subnet-0508530a7b553cace"[0m
[0m[1mmodule.vpc.aws_route_table_association.private[2]: Creating...[0m
route_table_id: "" => "rtb-0eb95634bb4bdf242"
subnet_id: "" => "subnet-078025a95b8289428"[0m
[0m[1mmodule.vpc.aws_route_table_association.private[0]: Creating...[0m
route_table_id: "" => "rtb-01c859705a5683857"
subnet_id: "" => "subnet-0527838d60f1601f6"[0m
[0m[1mmodule.vpc.aws_route_table_association.private[1]: Creating...[0m
route_table_id: "" => "rtb-0cc4d8ad28295d807"
subnet_id: "" => "subnet-0adce266c18f72255"[0m
[0m[1maws_security_group_rule.default_egress: Creation complete after 0s (ID: sgrule-712216049)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.public[1]: Creation complete after 0s (ID: rtbassoc-08e10268bb436cde3)[0m[0m
[0m[1mmodule.vpc.aws_route.public_internet_gateway: Creation complete after 0s (ID: r-rtb-02d9caddda23f272f1080289494)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.private[0]: Creation complete after 0s (ID: rtbassoc-072092ddd4cbe1d4e)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.private[2]: Creation complete after 0s (ID: rtbassoc-0a1ec90e46fdec4b8)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.public[2]: Creation complete after 0s (ID: rtbassoc-07032d65a9f137ac4)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.private[1]: Creation complete after 0s (ID: rtbassoc-0c6024cd954c92135)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.public[0]: Creation complete after 1s (ID: rtbassoc-0c286cad3e587bdd3)[0m[0m
[0m[1maws_security_group_rule.default_ingress: Creation complete after 1s (ID: sgrule-1217565267)[0m[0m
[0m[1maws_security_group_rule.allow_all: Creation complete after 1s (ID: sgrule-2442055330)[0m[0m
[0m[1m[32m
Apply complete! Resources: 25 added, 0 changed, 0 destroyed.[0m
[0m[1m[32m
Outputs:
vpc_id = vpc-01b6178fc04d23888[0m
/data/src/github.com/openshift/cluster-api-provider-aws
+ popd
+ sudo cp /etc/ssl/certs/ca-bundle.crt /etc/ssl/certs/ca-certificates.crt
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: CREATE CLUSTER RESOURCES [00h 00m 31s] ##########
[workspace] $ /bin/bash /tmp/jenkins6718608391732824932.sh
########## STARTING STAGE: RUN E2E TESTS ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ mktemp
+ script=/tmp/tmp.aVWCHYXuLX
+ cat
+ chmod +x /tmp/tmp.aVWCHYXuLX
+ scp -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.aVWCHYXuLX openshiftdevel:/tmp/tmp.aVWCHYXuLX
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.aVWCHYXuLX"'
+ cd /home/origin
+ set +x
+ chmod 0600 /home/origin/.ssh/devenv.pem
+ export CLUSTER_ID=pr-975-258
+ CLUSTER_ID=pr-975-258
+ export ENVIRONMENT_ID=pr-975-258
+ ENVIRONMENT_ID=pr-975-258
+ export SSH_PK=/home/origin/.ssh/devenv.pem
+ SSH_PK=/home/origin/.ssh/devenv.pem
++ sudo kubectl get service kubernetes -o json
++ jq .spec.clusterIP --raw-output
+ export CLUSTER_IP=10.96.0.1
+ CLUSTER_IP=10.96.0.1
+ export KUBECONFIG=/etc/kubernetes/admin.conf
+ KUBECONFIG=/etc/kubernetes/admin.conf
+ sudo -E kubectl config set-cluster kubernetes --server=https://10.96.0.1:443
Cluster "kubernetes" set.
+ source /home/origin/.gimme/envs/go1.10.1.env
++ unset GOOS
++ unset GOARCH
++ export GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64
++ GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64
++ export PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin
++ PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin
++ go version
go version go1.10.1 linux/amd64
+ sudo cp /home/origin/.gimme/versions/go1.10.1.linux.amd64/bin/go /bin/go
++ git describe --always --abbrev=7
+ sudo -E make k8s-e2e ACTUATOR_IMAGE=origin-aws-machine-controllers:v0.2.0-173-gb8c169e
# KUBECONFIG and SSH_PK dirs needs to be mounted inside a container if tests are run in containers
go test -timeout 30m \
-v sigs.k8s.io/cluster-api-provider-aws/test/machines \
-kubeconfig ${KUBECONFIG:-~/.kube/config} \
-ssh-key ${SSH_PK:-~/.ssh/id_rsa} \
-machine-controller-image ${ACTUATOR_IMAGE:-gcr.io/k8s-cluster-api/aws-machine-controller:0.0.1} \
-machine-manager-image ${ACTUATOR_IMAGE:-gcr.io/k8s-cluster-api/aws-machine-controller:0.0.1} \
-nodelink-controller-image $(docker run registry.svc.ci.openshift.org/origin/release:4.2 image machine-api-operator) \
-cluster-id ${ENVIRONMENT_ID:-""} \
-ginkgo.v \
-args -v 5 -logtostderr true
Unable to find image 'registry.svc.ci.openshift.org/origin/release:4.2' locally
Trying to pull repository registry.svc.ci.openshift.org/origin/release ...
4.2: Pulling from registry.svc.ci.openshift.org/origin/release
c8d67acdb2ff: Pulling fs layer
79d11c1a86c4: Pulling fs layer
ffd263ce0b34: Pulling fs layer
73aafda5b164: Pulling fs layer
471e39299fe4: Pulling fs layer
d2ad566fdcfd: Pulling fs layer
73aafda5b164: Waiting
471e39299fe4: Waiting
d2ad566fdcfd: Waiting
79d11c1a86c4: Verifying Checksum
79d11c1a86c4: Download complete
ffd263ce0b34: Verifying Checksum
ffd263ce0b34: Download complete
73aafda5b164: Verifying Checksum
73aafda5b164: Download complete
471e39299fe4: Verifying Checksum
471e39299fe4: Download complete
d2ad566fdcfd: Verifying Checksum
d2ad566fdcfd: Download complete
c8d67acdb2ff: Verifying Checksum
c8d67acdb2ff: Download complete
c8d67acdb2ff: Pull complete
79d11c1a86c4: Pull complete
ffd263ce0b34: Pull complete
73aafda5b164: Pull complete
471e39299fe4: Pull complete
d2ad566fdcfd: Pull complete
Digest: sha256:f733a35f525a020a690d84c83afc7a7a0c91c29f0128b72847407fa647ed5de7
Status: Downloaded newer image for registry.svc.ci.openshift.org/origin/release:4.2
=== RUN TestCart
Running Suite: Machine Suite
============================
Random Seed: [1m1567607418[0m
Will run [1m3[0m of [1m3[0m specs
[0m[sigs.k8s.io] Machines[0m [90mAWS actuator[0m
[1mCan create AWS instances[0m
[37m/data/src/sigs.k8s.io/cluster-api-provider-aws/test/machines/machines_test.go:131[0m
[1mSTEP[0m: Creating "namespace-84a81477-cf20-11e9-b8bb-0a17b8b8d0c8" namespace
[1mSTEP[0m: Deploying cluster API stack components
[1mSTEP[0m: Deploying cluster CRD manifest
I0904 14:30:27.329534 19288 framework.go:335] create.err: <nil>
I0904 14:30:27.335838 19288 framework.go:344] get.err: <nil>
[1mSTEP[0m: Deploying machine CRD manifest
I0904 14:30:32.341851 19288 framework.go:335] create.err: <nil>
I0904 14:30:32.346661 19288 framework.go:344] get.err: <nil>
[1mSTEP[0m: Deploying machineset CRD manifest
I0904 14:30:37.350940 19288 framework.go:335] create.err: <nil>
I0904 14:30:37.354920 19288 framework.go:344] get.err: <nil>
[1mSTEP[0m: Deploying machinedeployment CRD manifest
I0904 14:30:42.360183 19288 framework.go:335] create.err: <nil>
I0904 14:30:42.366127 19288 framework.go:344] get.err: <nil>
[1mSTEP[0m: Deploying cluster role
[1mSTEP[0m: Deploying machine API controllers
[1mSTEP[0m: Waiting until cluster objects can be listed
[1mSTEP[0m: Cluster API stack deployed
[1mSTEP[0m: Creating "pr-975-258" cluster
[1mSTEP[0m: Creating "pr-975-258-machine-9ef1e7" machine
[1mSTEP[0m: Waiting for "pr-975-258-machine-9ef1e7" machine
[1mSTEP[0m: Verify machine's underlying instance is running
I0904 14:31:17.418500 19288 machines.go:80] Waiting for instance to come up
I0904 14:31:22.418526 19288 machines.go:80] Waiting for instance to come up
I0904 14:31:27.418514 19288 machines.go:80] Waiting for instance to come up
I0904 14:31:27.543913 19288 machines.go:88] Machine is running
[1mSTEP[0m: Checking subnet
[1mSTEP[0m: Checking availability zone
[1mSTEP[0m: Checking security groups
[1mSTEP[0m: Checking IAM role
[1mSTEP[0m: Checking tags
[1mSTEP[0m: Checking machine status
[1mSTEP[0m: Deleting "pr-975-258-machine-9ef1e7" machine
[1mSTEP[0m: Verify instance is terminated
I0904 14:31:37.980673 19288 machines.go:50] Waiting for instance to be terminated
[1mSTEP[0m: Deleting "pr-975-258-machine-9ef1e7" machine
[1mSTEP[0m: Verify instance is terminated
I0904 14:31:48.085993 19288 machines.go:50] Waiting for instance to be terminated
[1mSTEP[0m: Deleting machine API controllers
I0904 14:31:53.193621 19288 framework.go:311] del.err: <nil>
I0904 14:31:53.195253 19288 framework.go:323] get.err: deployments.apps "clusterapi-controllers" not found
[1mSTEP[0m: Deleting cluster role
I0904 14:31:58.201667 19288 framework.go:311] del.err: <nil>
I0904 14:31:58.204075 19288 framework.go:323] get.err: clusterrolebindings.rbac.authorization.k8s.io "manager-rolebinding" not found
I0904 14:32:03.208472 19288 framework.go:311] del.err: <nil>
I0904 14:32:03.210679 19288 framework.go:323] get.err: clusterroles.rbac.authorization.k8s.io "manager-role" not found
time="2019-09-04T14:32:03Z" level=info msg="namespace-84a81477-cf20-11e9-b8bb-0a17b8b8d0c8: &v1.Namespace{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"namespace-84a81477-cf20-11e9-b8bb-0a17b8b8d0c8\", GenerateName:\"\", Namespace:\"\", SelfLink:\"\", UID:\"\", ResourceVersion:\"\", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:\"\", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.NamespaceSpec{Finalizers:[]v1.FinalizerName(nil)}, Status:v1.NamespaceStatus{Phase:\"\"}}" source="machines_test.go:73"
[1mSTEP[0m: Destroying "namespace-84a81477-cf20-11e9-b8bb-0a17b8b8d0c8" namespace
[32m• [SLOW TEST:104.916 seconds][0m
[sigs.k8s.io] Machines
[90m/data/src/sigs.k8s.io/cluster-api-provider-aws/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/framework/framework.go:373[0m
AWS actuator
[90m/data/src/sigs.k8s.io/cluster-api-provider-aws/test/machines/machines_test.go:87[0m
Can create AWS instances
[90m/data/src/sigs.k8s.io/cluster-api-provider-aws/test/machines/machines_test.go:131[0m
[90m------------------------------[0m
[0m[sigs.k8s.io] Machines[0m [90mAWS actuator[0m
[1mCan create EBS volumes[0m
[37m/data/src/sigs.k8s.io/cluster-api-provider-aws/test/machines/machines_test.go:209[0m
[1mSTEP[0m: Creating "namespace-c3304f9a-cf20-11e9-b8bb-0a17b8b8d0c8" namespace
[1mSTEP[0m: Deploying cluster API stack components
[1mSTEP[0m: Deploying cluster CRD manifest
I0904 14:32:08.221621 19288 framework.go:335] create.err: customresourcedefinitions.apiextensions.k8s.io "clusters.cluster.k8s.io" already exists
[1mSTEP[0m: Deploying machine CRD manifest
I0904 14:32:13.225638 19288 framework.go:335] create.err: customresourcedefinitions.apiextensions.k8s.io "machines.machine.openshift.io" already exists
[1mSTEP[0m: Deploying machineset CRD manifest
I0904 14:32:18.230712 19288 framework.go:335] create.err: customresourcedefinitions.apiextensions.k8s.io "machinesets.machine.openshift.io" already exists
[1mSTEP[0m: Deploying machinedeployment CRD manifest
I0904 14:32:23.235793 19288 framework.go:335] create.err: customresourcedefinitions.apiextensions.k8s.io "machinedeployments.machine.openshift.io" already exists
[1mSTEP[0m: Deploying cluster role
[1mSTEP[0m: Deploying machine API controllers
[1mSTEP[0m: Waiting until cluster objects can be listed
[1mSTEP[0m: Cluster API stack deployed
[1mSTEP[0m: Creating "pr-975-258" cluster
[1mSTEP[0m: Creating "pr-975-258-machine-db0ee0" machine
[1mSTEP[0m: Waiting for "pr-975-258-machine-db0ee0" machine
[1mSTEP[0m: Verify machine's underlying instance is running
I0904 14:32:58.267086 19288 machines.go:80] Waiting for instance to come up
I0904 14:33:03.267100 19288 machines.go:80] Waiting for instance to come up
I0904 14:33:08.267088 19288 machines.go:80] Waiting for instance to come up
I0904 14:33:08.381360 19288 machines.go:88] Machine is running
[1mSTEP[0m: Checking EBS volume mount
[1mSTEP[0m: Checking EBS volume size
[1mSTEP[0m: Checking EBS volume type
[1mSTEP[0m: Checking only root volume get's modified
[1mSTEP[0m: Deleting "pr-975-258-machine-db0ee0" machine
[1mSTEP[0m: Verify instance is terminated
I0904 14:33:18.680913 19288 machines.go:50] Waiting for instance to be terminated
[1mSTEP[0m: Deleting machine API controllers
I0904 14:33:23.736154 19288 framework.go:311] del.err: <nil>
I0904 14:33:23.738385 19288 framework.go:323] get.err: deployments.apps "clusterapi-controllers" not found
[1mSTEP[0m: Deleting cluster role
I0904 14:33:28.742971 19288 framework.go:311] del.err: <nil>
I0904 14:33:28.744421 19288 framework.go:323] get.err: clusterrolebindings.rbac.authorization.k8s.io "manager-rolebinding" not found
I0904 14:33:33.748679 19288 framework.go:311] del.err: <nil>
I0904 14:33:33.750726 19288 framework.go:323] get.err: clusterroles.rbac.authorization.k8s.io "manager-role" not found
time="2019-09-04T14:33:33Z" level=info msg="namespace-c3304f9a-cf20-11e9-b8bb-0a17b8b8d0c8: &v1.Namespace{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"namespace-c3304f9a-cf20-11e9-b8bb-0a17b8b8d0c8\", GenerateName:\"\", Namespace:\"\", SelfLink:\"\", UID:\"\", ResourceVersion:\"\", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:\"\", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.NamespaceSpec{Finalizers:[]v1.FinalizerName(nil)}, Status:v1.NamespaceStatus{Phase:\"\"}}" source="machines_test.go:73"
[1mSTEP[0m: Destroying "namespace-c3304f9a-cf20-11e9-b8bb-0a17b8b8d0c8" namespace
[32m• [SLOW TEST:90.540 seconds][0m
[sigs.k8s.io] Machines
[90m/data/src/sigs.k8s.io/cluster-api-provider-aws/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/framework/framework.go:373[0m
AWS actuator
[90m/data/src/sigs.k8s.io/cluster-api-provider-aws/test/machines/machines_test.go:87[0m
Can create EBS volumes
[90m/data/src/sigs.k8s.io/cluster-api-provider-aws/test/machines/machines_test.go:209[0m
[90m------------------------------[0m
[0m[sigs.k8s.io] Machines[0m [90mAWS actuator[0m
[1mCan deploy compute nodes through machineset[0m
[37m/data/src/sigs.k8s.io/cluster-api-provider-aws/test/machines/machines_test.go:245[0m
[1mSTEP[0m: Creating "namespace-f927ab3c-cf20-11e9-b8bb-0a17b8b8d0c8" namespace
[1mSTEP[0m: Deploying cluster API stack components
[1mSTEP[0m: Deploying cluster CRD manifest
I0904 14:33:38.761065 19288 framework.go:335] create.err: customresourcedefinitions.apiextensions.k8s.io "clusters.cluster.k8s.io" already exists
[1mSTEP[0m: Deploying machine CRD manifest
I0904 14:33:43.767532 19288 framework.go:335] create.err: customresourcedefinitions.apiextensions.k8s.io "machines.machine.openshift.io" already exists
[1mSTEP[0m: Deploying machineset CRD manifest
I0904 14:33:48.772600 19288 framework.go:335] create.err: customresourcedefinitions.apiextensions.k8s.io "machinesets.machine.openshift.io" already exists
[1mSTEP[0m: Deploying machinedeployment CRD manifest
I0904 14:33:53.777074 19288 framework.go:335] create.err: customresourcedefinitions.apiextensions.k8s.io "machinedeployments.machine.openshift.io" already exists
[1mSTEP[0m: Deploying cluster role
[1mSTEP[0m: Deploying machine API controllers
[1mSTEP[0m: Waiting until cluster objects can be listed
[1mSTEP[0m: Cluster API stack deployed
[1mSTEP[0m: Creating "pr-975-258" cluster
[1mSTEP[0m: Creating "pr-975-258-master-machine-14021e" machine
[1mSTEP[0m: Waiting for "pr-975-258-master-machine-14021e" machine
[1mSTEP[0m: Verify machine's underlying instance is running
I0904 14:34:33.812904 19288 machines.go:80] Waiting for instance to come up
I0904 14:34:38.812810 19288 machines.go:80] Waiting for instance to come up
I0904 14:34:43.812907 19288 machines.go:80] Waiting for instance to come up
I0904 14:34:43.921258 19288 machines.go:88] Machine is running
[1mSTEP[0m: Collecting master kubeconfig
I0904 14:34:53.988805 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:35:58.196795 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:35:58.988828 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:35:59.274100 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:36:03.988833 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:36:04.362786 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:36:08.988839 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:36:09.298005 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:36:13.988829 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:36:14.490539 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:36:18.988835 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:36:19.330856 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:36:23.988838 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:36:24.270604 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:36:28.988824 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:36:29.267045 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:36:33.988833 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:36:34.265165 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:36:38.988835 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:36:39.274587 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:36:43.988828 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:36:44.268054 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:36:48.988831 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:36:49.262034 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:36:53.988831 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:36:54.264279 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:36:58.988826 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:36:59.271960 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:37:03.988827 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:37:04.265329 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:37:08.988832 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:37:09.271657 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:37:13.988832 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:37:14.277223 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:37:18.988824 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:37:19.289286 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:37:23.988829 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:37:24.267509 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:37:28.988833 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:37:29.292202 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:37:33.988824 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:37:34.275699 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:37:38.988816 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:37:39.300533 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:37:43.988834 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:37:44.304042 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:37:48.988823 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:37:49.293622 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:37:53.988835 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:37:54.299392 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:37:58.988823 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:37:59.301154 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:38:03.988825 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:38:04.266026 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:38:08.988826 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:38:09.325494 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:38:13.988816 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:38:14.295616 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:38:18.988830 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:38:19.271894 19288 machines.go:324] Unable to pull kubeconfig: failed to collect kubeconfig: Process exited with status 1, cat: /root/.kube/config: No such file or directory
I0904 14:38:23.988821 19288 machines.go:316] Pulling kubeconfig from ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:38:24.308095 19288 machines.go:334] Master running on https://ec2-35-174-241-123.compute-1.amazonaws.com:8443
I0904 14:38:24.447686 19288 framework.go:377] Uploading "origin-aws-machine-controllers:v0.2.0-173-gb8c169e" to the master machine under "ec2-35-174-241-123.compute-1.amazonaws.com"
I0904 14:39:16.695938 19288 framework.go:391] Warning: Permanently added 'ec2-35-174-241-123.compute-1.amazonaws.com,35.174.241.123' (ECDSA) to the list of known hosts.
Loaded image: origin-aws-machine-controllers:v0.2.0-173-gb8c169e
[1mSTEP[0m: Creating "namespace-f927ab3c-cf20-11e9-b8bb-0a17b8b8d0c8" namespace
[1mSTEP[0m: Deploying cluster API stack components
[1mSTEP[0m: Deploying cluster CRD manifest
I0904 14:39:21.724900 19288 framework.go:335] create.err: <nil>
I0904 14:39:21.728080 19288 framework.go:344] get.err: <nil>
[1mSTEP[0m: Deploying machine CRD manifest
I0904 14:39:26.733147 19288 framework.go:335] create.err: <nil>
I0904 14:39:26.737335 19288 framework.go:344] get.err: <nil>
[1mSTEP[0m: Deploying machineset CRD manifest
I0904 14:39:31.741725 19288 framework.go:335] create.err: <nil>
I0904 14:39:33.060902 19288 framework.go:344] get.err: <nil>
[1mSTEP[0m: Deploying machinedeployment CRD manifest
I0904 14:39:38.065915 19288 framework.go:335] create.err: <nil>
I0904 14:39:38.068614 19288 framework.go:344] get.err: <nil>
[1mSTEP[0m: Deploying cluster role
[1mSTEP[0m: Deploying machine API controllers
[1mSTEP[0m: Waiting until cluster objects can be listed
[1mSTEP[0m: Cluster API stack deployed
[1mSTEP[0m: Deploy worker nodes through machineset
[1mSTEP[0m: Creating "pr-975-258" cluster
workerMachineSet: &v1beta1.MachineSet{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pr-975-258-worker-machineset-e15efa", GenerateName:"pr-975-258-worker-machine-e15efa-", Namespace:"namespace-f927ab3c-cf20-11e9-b8bb-0a17b8b8d0c8", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"machine.openshift.io/cluster-api-cluster":"pr-975-258"}, Annotations:map[string]string{"machine.openshift.io/exclude-node-draining":""}, OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1beta1.MachineSetSpec{Replicas:(*int32)(0xc4205b88b0), MinReadySeconds:0, DeletePolicy:"", Selector:v1.LabelSelector{MatchLabels:map[string]string{"machine.openshift.io/cluster-api-cluster":"pr-975-258", "machine.openshift.io/cluster-api-machineset":"pr-975-258-worker-machineset-e15efa"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}, Template:v1beta1.MachineTemplateSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"pr-975-258-worker-machine-e15efa-", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"machine.openshift.io/cluster-api-machineset":"pr-975-258-worker-machineset-e15efa", "machine.openshift.io/cluster-api-cluster":"pr-975-258"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1beta1.MachineSpec{ObjectMeta:v1.ObjectMeta{Name:"", GenerateName:"", Namespace:"", SelfLink:"", UID:"", ResourceVersion:"", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"node-role.kubernetes.io/compute":""}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Taints:[]v1.Taint(nil), ProviderSpec:v1beta1.ProviderSpec{Value:(*runtime.RawExtension)(0xc42064e810)}, ProviderID:(*string)(nil)}}}, Status:v1beta1.MachineSetStatus{Replicas:0, FullyLabeledReplicas:0, ReadyReplicas:0, AvailableReplicas:0, ObservedGeneration:0, ErrorReason:(*common.MachineSetStatusError)(nil), ErrorMessage:(*string)(nil)}}
[1mSTEP[0m: Creating "pr-975-258-worker-machineset-e15efa" machineset
[1mSTEP[0m: Verify machineset's underlying instances is running
[1mSTEP[0m: Waiting for "pr-975-258-worker-machineset-e15efa-b84cz" machine
[1mSTEP[0m: Verify machine's underlying instance is running
I0904 14:40:23.365227 19288 machines.go:80] Waiting for instance to come up
I0904 14:40:28.365284 19288 machines.go:80] Waiting for instance to come up
I0904 14:40:28.508734 19288 machines.go:88] Machine is running
[1mSTEP[0m: Checking master and worker nodes are ready
I0904 14:40:33.513113 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:40:38.514769 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:40:43.511611 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:40:48.512142 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:40:53.513058 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:40:58.512039 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:41:03.511674 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:41:08.511982 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:41:13.511652 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:41:18.511575 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:41:23.511877 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:41:28.512082 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:41:33.511994 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:41:38.511929 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:41:43.512141 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:41:48.511984 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:41:53.511956 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:41:58.512149 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:42:03.511837 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:42:08.511926 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:42:13.511779 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:42:18.511693 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:42:23.511873 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:42:28.512080 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:42:33.511924 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:42:38.511961 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:42:43.512021 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:42:48.511990 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:42:53.511794 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:42:58.512078 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:43:03.511576 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:43:08.512157 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:43:13.511599 19288 machines.go:226] Waiting for 2 nodes to come up, have 1
I0904 14:43:18.512057 19288 machines.go:239] Node "ip-10-0-101-140.ec2.internal" is ready
I0904 14:43:18.512086 19288 machines.go:236] Node "ip-10-0-101-214.ec2.internal" not ready
I0904 14:43:23.512552 19288 machines.go:239] Node "ip-10-0-101-140.ec2.internal" is ready
I0904 14:43:23.512578 19288 machines.go:236] Node "ip-10-0-101-214.ec2.internal" not ready
I0904 14:43:28.512295 19288 machines.go:239] Node "ip-10-0-101-140.ec2.internal" is ready
I0904 14:43:28.512327 19288 machines.go:239] Node "ip-10-0-101-214.ec2.internal" is ready
[1mSTEP[0m: Checking compute node role and node linking
time="2019-09-04T14:43:33Z" level=info msg="node \"ip-10-0-101-214.ec2.internal\" role set to 'node-role.kubernetes.io/compute'" source="machines_test.go:374"
time="2019-09-04T14:43:33Z" level=info msg="node \"ip-10-0-101-214.ec2.internal\" is linked with \"pr-975-258-worker-machineset-e15efa-b84cz\" machine" source="machines_test.go:385"
[1mSTEP[0m: Destroying worker machines
[1mSTEP[0m: Get all "pr-975-258-worker-machineset-e15efa" machineset's machines
[1mSTEP[0m: Deleting "pr-975-258-worker-machineset-e15efa" machineset
[1mSTEP[0m: Waiting for all machines to be deleted
[1mSTEP[0m: Verify machine's underlying instance is not running
I0904 14:43:38.527062 19288 machines.go:102] Waiting for instance to terminate
[1mSTEP[0m: Waiting for "pr-975-258-worker-machineset-e15efa-b84cz" machine object to be deleted
I0904 14:53:38.675509 19288 machines.go:130] unable to wait for machine to get deleted: timed out waiting for the condition
[1mSTEP[0m: Destroying master machine
[1mSTEP[0m: Deleting "pr-975-258-master-machine-14021e" machine
[1mSTEP[0m: Verify instance is terminated
I0904 14:53:53.685916 19288 machines.go:50] Waiting for instance to be terminated
[1mSTEP[0m: Get all "pr-975-258-worker-machineset-e15efa" machineset's machines
[1mSTEP[0m: Deleting "pr-975-258-master-machine-14021e" machine
[1mSTEP[0m: Verify instance is terminated
I0904 14:54:33.800196 19288 machines.go:50] Waiting for instance to be terminated
[1mSTEP[0m: Deleting machine API controllers
I0904 14:54:38.892675 19288 framework.go:311] del.err: <nil>
I0904 14:54:38.897795 19288 framework.go:323] get.err: deployments.apps "clusterapi-controllers" not found
[1mSTEP[0m: Deleting cluster role
I0904 14:54:43.901900 19288 framework.go:311] del.err: <nil>
I0904 14:54:43.903239 19288 framework.go:323] get.err: clusterrolebindings.rbac.authorization.k8s.io "manager-rolebinding" not found
I0904 14:54:48.907315 19288 framework.go:311] del.err: <nil>
I0904 14:54:48.908805 19288 framework.go:323] get.err: clusterroles.rbac.authorization.k8s.io "manager-role" not found
time="2019-09-04T14:54:48Z" level=info msg="namespace-f927ab3c-cf20-11e9-b8bb-0a17b8b8d0c8: &v1.Namespace{TypeMeta:v1.TypeMeta{Kind:\"\", APIVersion:\"\"}, ObjectMeta:v1.ObjectMeta{Name:\"namespace-f927ab3c-cf20-11e9-b8bb-0a17b8b8d0c8\", GenerateName:\"\", Namespace:\"\", SelfLink:\"\", UID:\"\", ResourceVersion:\"\", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Initializers:(*v1.Initializers)(nil), Finalizers:[]string(nil), ClusterName:\"\", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.NamespaceSpec{Finalizers:[]v1.FinalizerName(nil)}, Status:v1.NamespaceStatus{Phase:\"\"}}" source="machines_test.go:73"
[1mSTEP[0m: Destroying "namespace-f927ab3c-cf20-11e9-b8bb-0a17b8b8d0c8" namespace
[32m• [SLOW TEST:1275.158 seconds][0m
[sigs.k8s.io] Machines
[90m/data/src/sigs.k8s.io/cluster-api-provider-aws/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/framework/framework.go:373[0m
AWS actuator
[90m/data/src/sigs.k8s.io/cluster-api-provider-aws/test/machines/machines_test.go:87[0m
Can deploy compute nodes through machineset
[90m/data/src/sigs.k8s.io/cluster-api-provider-aws/test/machines/machines_test.go:245[0m
[90m------------------------------[0m
[1m[32mRan 3 of 3 Specs in 1470.614 seconds[0m
[1m[32mSUCCESS![0m -- [32m[1m3 Passed[0m | [91m[1m0 Failed[0m | [33m[1m0 Pending[0m | [36m[1m0 Skipped[0m
--- PASS: TestCart (1470.61s)
PASS
ok sigs.k8s.io/cluster-api-provider-aws/test/machines 1470.659s
The aws instance was running and terminated
+ echo 'The aws instance was running and terminated'
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: RUN E2E TESTS [00h 25m 22s] ##########
[PostBuildScript] - Executing post build scripts.
[workspace] $ /bin/bash /tmp/jenkins8102904811257147110.sh
########## STARTING STAGE: DOWNLOAD ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/gathered
+ rm -rf /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/gathered
+ mkdir -p /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/gathered
+ tree /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/gathered
/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/gathered
0 directories, 0 files
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins4343081540019051232.sh
########## STARTING STAGE: GENERATE ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/generated
+ rm -rf /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/generated
+ mkdir /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/generated
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo docker version && sudo docker info && sudo docker images && sudo docker ps -a 2>&1'
WARNING: You're not using the default seccomp profile
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo cat /etc/sysconfig/docker /etc/sysconfig/docker-network /etc/sysconfig/docker-storage /etc/sysconfig/docker-storage-setup /etc/systemd/system/docker.service 2>&1'
+ true
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo find /var/lib/docker/containers -name *.log | sudo xargs tail -vn +1 2>&1'
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo ausearch -m AVC -m SELINUX_ERR -m USER_AVC 2>&1'
+ true
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo df -T -h && sudo pvs && sudo vgs && sudo lvs && sudo findmnt --all 2>&1'
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo yum list installed 2>&1'
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl --dmesg --no-pager --all --lines=all 2>&1'
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl _PID=1 --no-pager --all --lines=all 2>&1'
+ tree /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/generated
/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/generated
├── avc_denials.log
├── containers.log
├── dmesg.log
├── docker.config
├── docker.info
├── filesystem.info
├── installed_packages.log
└── pid1.journal
0 directories, 8 files
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins2799588782348008507.sh
########## STARTING STAGE: FETCH SYSTEMD JOURNALS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/journals
+ rm -rf /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/journals
+ mkdir /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/journals
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit docker.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit dnsmasq.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all
+ tree /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/journals
/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/artifacts/journals
├── dnsmasq.service
├── docker.service
└── systemd-journald.service
0 directories, 3 files
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins1064271138803010849.sh
########## STARTING STAGE: DEPROVISION RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ mktemp
+ script=/tmp/tmp.TDo2nlUM3V
+ cat
+ chmod +x /tmp/tmp.TDo2nlUM3V
+ scp -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.TDo2nlUM3V openshiftdevel:/tmp/tmp.TDo2nlUM3V
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.TDo2nlUM3V"'
+ cd /home/origin
+ cd /data/src/github.com/openshift/cluster-api-provider-aws/hack
+ [[ -e envs ]]
+ set +x
+ AWS_DEFAULT_REGION=us-east-1
+ ./aws-provision.sh destroy
/data/src/github.com/openshift/cluster-api-provider-aws/hack/prebuild /data/src/github.com/openshift/cluster-api-provider-aws/hack
{
"TerminatingInstances": [
{
"InstanceId": "i-08a574885b8679b39",
"CurrentState": {
"Code": 32,
"Name": "shutting-down"
},
"PreviousState": {
"Code": 16,
"Name": "running"
}
}
]
}
[0m[1mdata.aws_region.current: Refreshing state...[0m
[0m[1maws_iam_role.role: Refreshing state... (ID: pr-975-258-role)[0m
[0m[1mdata.aws_availability_zones.azs: Refreshing state...[0m
[0m[1maws_vpc.this: Refreshing state... (ID: vpc-01b6178fc04d23888)[0m
[0m[1maws_iam_instance_profile.test_profile: Refreshing state... (ID: pr-975-258-worker-profile)[0m
[0m[1maws_security_group.cluster_default: Refreshing state... (ID: sg-03e2fd2a04defa788)[0m
[0m[1maws_route_table.public: Refreshing state... (ID: rtb-02d9caddda23f272f)[0m
[0m[1maws_route_table.private[1]: Refreshing state... (ID: rtb-0cc4d8ad28295d807)[0m
[0m[1maws_route_table.private[2]: Refreshing state... (ID: rtb-0eb95634bb4bdf242)[0m
[0m[1maws_route_table.private[0]: Refreshing state... (ID: rtb-01c859705a5683857)[0m
[0m[1maws_subnet.public[1]: Refreshing state... (ID: subnet-0df88ffea33bee0de)[0m
[0m[1maws_subnet.public[0]: Refreshing state... (ID: subnet-079ba6af91da7e6e2)[0m
[0m[1maws_subnet.private[1]: Refreshing state... (ID: subnet-0adce266c18f72255)[0m
[0m[1maws_subnet.public[2]: Refreshing state... (ID: subnet-0508530a7b553cace)[0m
[0m[1maws_subnet.private[0]: Refreshing state... (ID: subnet-0527838d60f1601f6)[0m
[0m[1maws_subnet.private[2]: Refreshing state... (ID: subnet-078025a95b8289428)[0m
[0m[1maws_internet_gateway.this: Refreshing state... (ID: igw-0bcc735e9b9964b0d)[0m
[0m[1maws_security_group_rule.default_ingress: Refreshing state... (ID: sgrule-1217565267)[0m
[0m[1maws_security_group_rule.default_egress: Refreshing state... (ID: sgrule-712216049)[0m
[0m[1maws_security_group_rule.allow_all: Refreshing state... (ID: sgrule-2442055330)[0m
[0m[1maws_route_table_association.public[1]: Refreshing state... (ID: rtbassoc-08e10268bb436cde3)[0m
[0m[1maws_route_table_association.public[0]: Refreshing state... (ID: rtbassoc-0c286cad3e587bdd3)[0m
[0m[1maws_route_table_association.public[2]: Refreshing state... (ID: rtbassoc-07032d65a9f137ac4)[0m
[0m[1maws_route.public_internet_gateway: Refreshing state... (ID: r-rtb-02d9caddda23f272f1080289494)[0m
[0m[1maws_route_table_association.private[1]: Refreshing state... (ID: rtbassoc-0c6024cd954c92135)[0m
[0m[1maws_route_table_association.private[0]: Refreshing state... (ID: rtbassoc-072092ddd4cbe1d4e)[0m
[0m[1maws_route_table_association.private[2]: Refreshing state... (ID: rtbassoc-0a1ec90e46fdec4b8)[0m
[0m[1maws_security_group_rule.default_egress: Destroying... (ID: sgrule-712216049)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.public[2]: Destroying... (ID: rtbassoc-07032d65a9f137ac4)[0m[0m
[0m[1maws_iam_instance_profile.test_profile: Destroying... (ID: pr-975-258-worker-profile)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.private[1]: Destroying... (ID: rtbassoc-0c6024cd954c92135)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.public[1]: Destroying... (ID: rtbassoc-08e10268bb436cde3)[0m[0m
[0m[1mmodule.vpc.aws_route.public_internet_gateway: Destroying... (ID: r-rtb-02d9caddda23f272f1080289494)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.private[2]: Destroying... (ID: rtbassoc-0a1ec90e46fdec4b8)[0m[0m
[0m[1maws_security_group_rule.allow_all: Destroying... (ID: sgrule-2442055330)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.public[0]: Destroying... (ID: rtbassoc-0c286cad3e587bdd3)[0m[0m
[0m[1maws_security_group_rule.default_ingress: Destroying... (ID: sgrule-1217565267)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.public[0]: Destruction complete after 1s[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.private[1]: Destruction complete after 1s[0m[0m
[0m[1maws_iam_instance_profile.test_profile: Destruction complete after 1s[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.private[0]: Destroying... (ID: rtbassoc-072092ddd4cbe1d4e)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.public[2]: Destruction complete after 1s[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.private[2]: Destruction complete after 1s[0m[0m
[0m[1maws_iam_role.role: Destroying... (ID: pr-975-258-role)[0m[0m
[0m[1maws_security_group_rule.default_egress: Destruction complete after 1s[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.public[1]: Destruction complete after 1s[0m[0m
[0m[1mmodule.vpc.aws_route.public_internet_gateway: Destruction complete after 1s[0m[0m
[0m[1mmodule.vpc.aws_subnet.public[1]: Destroying... (ID: subnet-0df88ffea33bee0de)[0m[0m
[0m[1mmodule.vpc.aws_subnet.public[2]: Destroying... (ID: subnet-0508530a7b553cace)[0m[0m
[0m[1mmodule.vpc.aws_subnet.public[0]: Destroying... (ID: subnet-079ba6af91da7e6e2)[0m[0m
[0m[1mmodule.vpc.aws_route_table.public: Destroying... (ID: rtb-02d9caddda23f272f)[0m[0m
[0m[1mmodule.vpc.aws_internet_gateway.this: Destroying... (ID: igw-0bcc735e9b9964b0d)[0m[0m
[0m[1mmodule.vpc.aws_route_table_association.private[0]: Destruction complete after 0s[0m[0m
[0m[1mmodule.vpc.aws_route_table.private[2]: Destroying... (ID: rtb-0eb95634bb4bdf242)[0m[0m
[0m[1mmodule.vpc.aws_subnet.private[1]: Destroying... (ID: subnet-0adce266c18f72255)[0m[0m
[0m[1maws_iam_role.role: Destruction complete after 0s[0m[0m
[0m[1mmodule.vpc.aws_route_table.private[0]: Destroying... (ID: rtb-01c859705a5683857)[0m[0m
[0m[1maws_security_group_rule.allow_all: Destruction complete after 1s[0m[0m
[0m[1mmodule.vpc.aws_subnet.private[2]: Destroying... (ID: subnet-078025a95b8289428)[0m[0m
[0m[1mmodule.vpc.aws_route_table.public: Destruction complete after 0s[0m[0m
[0m[1mmodule.vpc.aws_route_table.private[1]: Destroying... (ID: rtb-0cc4d8ad28295d807)[0m[0m
[0m[1mmodule.vpc.aws_route_table.private[2]: Destruction complete after 0s[0m[0m
[0m[1mmodule.vpc.aws_subnet.private[0]: Destroying... (ID: subnet-0527838d60f1601f6)[0m[0m
[0m[1maws_security_group_rule.default_ingress: Destruction complete after 1s[0m[0m
[0m[1maws_security_group.cluster_default: Destroying... (ID: sg-03e2fd2a04defa788)[0m[0m
[0m[1mmodule.vpc.aws_route_table.private[0]: Destruction complete after 0s[0m[0m
[0m[1mmodule.vpc.aws_route_table.private[1]: Destruction complete after 0s[0m[0m
[0m[1mmodule.vpc.aws_subnet.public[1]: Destruction complete after 0s[0m[0m
[0m[1mmodule.vpc.aws_subnet.private[1]: Destruction complete after 0s[0m[0m
[0m[1mmodule.vpc.aws_subnet.public[2]: Destruction complete after 0s[0m[0m
[0m[1mmodule.vpc.aws_subnet.private[2]: Destruction complete after 0s[0m[0m
[0m[1mmodule.vpc.aws_subnet.private[0]: Destruction complete after 1s[0m[0m
[0m[1mmodule.vpc.aws_subnet.public.0: Still destroying... (ID: subnet-079ba6af91da7e6e2, 10s elapsed)[0m[0m
[0m[1mmodule.vpc.aws_internet_gateway.this: Still destroying... (ID: igw-0bcc735e9b9964b0d, 10s elapsed)[0m[0m
[0m[1maws_security_group.cluster_default: Still destroying... (ID: sg-03e2fd2a04defa788, 10s elapsed)[0m[0m
[0m[1mmodule.vpc.aws_internet_gateway.this: Destruction complete after 17s[0m[0m
[0m[1mmodule.vpc.aws_subnet.public.0: Still destroying... (ID: subnet-079ba6af91da7e6e2, 20s elapsed)[0m[0m
[0m[1maws_security_group.cluster_default: Still destroying... (ID: sg-03e2fd2a04defa788, 20s elapsed)[0m[0m
[0m[1mmodule.vpc.aws_subnet.public[0]: Destruction complete after 26s[0m[0m
[0m[1maws_security_group.cluster_default: Destruction complete after 26s[0m[0m
[0m[1mmodule.vpc.aws_vpc.this: Destroying... (ID: vpc-01b6178fc04d23888)[0m[0m
[0m[1mmodule.vpc.aws_vpc.this: Destruction complete after 1s[0m[0m
[0m[1m[32m
Destroy complete! Resources: 25 destroyed.[0m
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPROVISION RESOURCES [00h 00m 33s] ##########
[workspace] $ /bin/bash /tmp/jenkins3086990846189927780.sh
########## STARTING STAGE: ASSEMBLE GCS OUTPUT ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
+ trap 'exit 0' EXIT
+ mkdir -p gcs/artifacts gcs/artifacts/generated gcs/artifacts/journals gcs/artifacts/gathered
++ python -c 'import json; import urllib; print json.load(urllib.urlopen('\''https://ci.openshift.redhat.com/jenkins/job/ci-kubernetes-aws-actuator/975/api/json'\''))['\''result'\'']'
+ result=SUCCESS
+ cat
++ date +%s
+ cat /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/builds/975/log
+ cp artifacts/generated/avc_denials.log artifacts/generated/containers.log artifacts/generated/dmesg.log artifacts/generated/docker.config artifacts/generated/docker.info artifacts/generated/filesystem.info artifacts/generated/installed_packages.log artifacts/generated/pid1.journal gcs/artifacts/generated/
+ cp artifacts/journals/dnsmasq.service artifacts/journals/docker.service artifacts/journals/systemd-journald.service gcs/artifacts/journals/
++ pwd
+ scp -F ./.config/origin-ci-tool/inventory/.ssh_config -r /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/gcs openshiftdevel:/data
+ scp -F ./.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.config/gcloud/gcs-publisher-credentials.json openshiftdevel:/data/credentials.json
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins8003300154911593544.sh
########## STARTING STAGE: PUSH THE ARTIFACTS AND METADATA ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ mktemp
+ script=/tmp/tmp.6t12DFh6xl
+ cat
+ chmod +x /tmp/tmp.6t12DFh6xl
+ scp -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.6t12DFh6xl openshiftdevel:/tmp/tmp.6t12DFh6xl
+ ssh -F /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 300 /tmp/tmp.6t12DFh6xl"'
+ cd /home/origin
+ trap 'exit 0' EXIT
+ [[ -n {"type":"presubmit","job":"ci-kubernetes-aws-actuator","buildid":"1169254337591906304","prowjobid":"54633cdd-cf1f-11e9-ab71-0a58ac108d31","refs":{"org":"openshift","repo":"cluster-api-provider-aws","repo_link":"https://github.com/openshift/cluster-api-provider-aws","base_ref":"master","base_sha":"d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","base_link":"https://github.com/openshift/cluster-api-provider-aws/commit/d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","pulls":[{"number":258,"author":"ingvagabund","sha":"6fc8f0a260389bc1798d5bc6fea49264a324669f","link":"https://github.com/openshift/cluster-api-provider-aws/pull/258","commit_link":"https://github.com/openshift/cluster-api-provider-aws/pull/258/commits/6fc8f0a260389bc1798d5bc6fea49264a324669f","author_link":"https://github.com/ingvagabund"}]}} ]]
++ jq --compact-output '.buildid |= "975"'
+ JOB_SPEC='{"type":"presubmit","job":"ci-kubernetes-aws-actuator","buildid":"975","prowjobid":"54633cdd-cf1f-11e9-ab71-0a58ac108d31","refs":{"org":"openshift","repo":"cluster-api-provider-aws","repo_link":"https://github.com/openshift/cluster-api-provider-aws","base_ref":"master","base_sha":"d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","base_link":"https://github.com/openshift/cluster-api-provider-aws/commit/d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","pulls":[{"number":258,"author":"ingvagabund","sha":"6fc8f0a260389bc1798d5bc6fea49264a324669f","link":"https://github.com/openshift/cluster-api-provider-aws/pull/258","commit_link":"https://github.com/openshift/cluster-api-provider-aws/pull/258/commits/6fc8f0a260389bc1798d5bc6fea49264a324669f","author_link":"https://github.com/ingvagabund"}]}}'
+ sudo docker run -e 'JOB_SPEC={"type":"presubmit","job":"ci-kubernetes-aws-actuator","buildid":"975","prowjobid":"54633cdd-cf1f-11e9-ab71-0a58ac108d31","refs":{"org":"openshift","repo":"cluster-api-provider-aws","repo_link":"https://github.com/openshift/cluster-api-provider-aws","base_ref":"master","base_sha":"d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","base_link":"https://github.com/openshift/cluster-api-provider-aws/commit/d4644b21c9dbeeb79215ad42e8d7f932dd3a5f41","pulls":[{"number":258,"author":"ingvagabund","sha":"6fc8f0a260389bc1798d5bc6fea49264a324669f","link":"https://github.com/openshift/cluster-api-provider-aws/pull/258","commit_link":"https://github.com/openshift/cluster-api-provider-aws/pull/258/commits/6fc8f0a260389bc1798d5bc6fea49264a324669f","author_link":"https://github.com/ingvagabund"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/gcsupload:latest --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json /data/gcs/artifacts /data/gcs/build-log.txt /data/gcs/finished.json
Unable to find image 'registry.svc.ci.openshift.org/ci/gcsupload:latest' locally
Trying to pull repository registry.svc.ci.openshift.org/ci/gcsupload ...
latest: Pulling from registry.svc.ci.openshift.org/ci/gcsupload
a073c86ecf9e: Already exists
cc3fc741b1a9: Already exists
822bed51ba40: Pulling fs layer
85cea451eec0: Pulling fs layer
85cea451eec0: Verifying Checksum
85cea451eec0: Download complete
822bed51ba40: Download complete
822bed51ba40: Pull complete
85cea451eec0: Pull complete
Digest: sha256:03aad50d7ec631ee07c12ac2ba679bd48c7781f7d5754f9e0dcc4e7260e35208
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/gcsupload:latest
{"component":"gcsupload","file":"prow/gcsupload/run.go:166","func":"k8s.io/test-infra/prow/gcsupload.gatherArtifacts","level":"info","msg":"Gathering artifacts from artifact directory: /data/gcs/artifacts","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:184","func":"k8s.io/test-infra/prow/gcsupload.gatherArtifacts.func1","level":"info","msg":"Found /data/gcs/artifacts/generated/avc_denials.log in artifact directory. Uploading as pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/avc_denials.log\n","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:184","func":"k8s.io/test-infra/prow/gcsupload.gatherArtifacts.func1","level":"info","msg":"Found /data/gcs/artifacts/generated/containers.log in artifact directory. Uploading as pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/containers.log\n","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:184","func":"k8s.io/test-infra/prow/gcsupload.gatherArtifacts.func1","level":"info","msg":"Found /data/gcs/artifacts/generated/dmesg.log in artifact directory. Uploading as pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/dmesg.log\n","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:184","func":"k8s.io/test-infra/prow/gcsupload.gatherArtifacts.func1","level":"info","msg":"Found /data/gcs/artifacts/generated/docker.config in artifact directory. Uploading as pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/docker.config\n","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:184","func":"k8s.io/test-infra/prow/gcsupload.gatherArtifacts.func1","level":"info","msg":"Found /data/gcs/artifacts/generated/docker.info in artifact directory. Uploading as pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/docker.info\n","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:184","func":"k8s.io/test-infra/prow/gcsupload.gatherArtifacts.func1","level":"info","msg":"Found /data/gcs/artifacts/generated/filesystem.info in artifact directory. Uploading as pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/filesystem.info\n","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:184","func":"k8s.io/test-infra/prow/gcsupload.gatherArtifacts.func1","level":"info","msg":"Found /data/gcs/artifacts/generated/installed_packages.log in artifact directory. Uploading as pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/installed_packages.log\n","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:184","func":"k8s.io/test-infra/prow/gcsupload.gatherArtifacts.func1","level":"info","msg":"Found /data/gcs/artifacts/generated/pid1.journal in artifact directory. Uploading as pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/pid1.journal\n","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:184","func":"k8s.io/test-infra/prow/gcsupload.gatherArtifacts.func1","level":"info","msg":"Found /data/gcs/artifacts/journals/dnsmasq.service in artifact directory. Uploading as pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/journals/dnsmasq.service\n","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:184","func":"k8s.io/test-infra/prow/gcsupload.gatherArtifacts.func1","level":"info","msg":"Found /data/gcs/artifacts/journals/docker.service in artifact directory. Uploading as pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/journals/docker.service\n","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:184","func":"k8s.io/test-infra/prow/gcsupload.gatherArtifacts.func1","level":"info","msg":"Found /data/gcs/artifacts/journals/systemd-journald.service in artifact directory. Uploading as pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/journals/systemd-journald.service\n","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/containers.log","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/dmesg.log","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/docker.config","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/installed_packages.log","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/finished.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/avc_denials.log","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/docker.info","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/journals/dnsmasq.service","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/directory/ci-kubernetes-aws-actuator/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/journals/systemd-journald.service","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/directory/ci-kubernetes-aws-actuator/975.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/filesystem.info","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/pid1.journal","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/journals/docker.service","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/build-log.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/finished.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/installed_packages.log","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/docker.config","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/journals/systemd-journald.service","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/filesystem.info","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/docker.info","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/directory/ci-kubernetes-aws-actuator/975.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/directory/ci-kubernetes-aws-actuator/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/avc_denials.log","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/journals/dnsmasq.service","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/containers.log","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/dmesg.log","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/generated/pid1.journal","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/build-log.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:51Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-aws/258/ci-kubernetes-aws-actuator/975/artifacts/journals/docker.service","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T14:55:52Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:65","func":"k8s.io/test-infra/prow/gcsupload.Options.Run","level":"info","msg":"Finished upload to GCS","time":"2019-09-04T14:55:52Z"}
+ exit 0
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: PUSH THE ARTIFACTS AND METADATA [00h 00m 06s] ##########
[workspace] $ /bin/bash /tmp/jenkins8404865748238474611.sh
########## STARTING STAGE: DEPROVISION CLOUD RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate ]]
+ source /var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config
+ oct deprovision
PLAYBOOK: main.yml *************************************************************
4 plays in /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml
PLAY [ensure we have the parameters necessary to deprovision virtual hosts] ****
TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:9
skipping: [localhost] => (item=origin_ci_inventory_dir) => {
"changed": false,
"generated_timestamp": "2019-09-04 10:55:53.351956",
"item": "origin_ci_inventory_dir",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_region) => {
"changed": false,
"generated_timestamp": "2019-09-04 10:55:53.356553",
"item": "origin_ci_aws_region",
"skip_reason": "Conditional check failed",
"skipped": true
}
PLAY [deprovision virtual hosts in EC2] ****************************************
TASK [Gathering Facts] *********************************************************
ok: [localhost]
TASK [deprovision a virtual EC2 host] ******************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:28
included: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml for localhost
TASK [update the SSH configuration to remove AWS EC2 specifics] ****************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:2
ok: [localhost] => {
"changed": false,
"generated_timestamp": "2019-09-04 10:55:54.258798",
"msg": ""
}
TASK [rename EC2 instance for termination reaper] ******************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:8
changed: [localhost] => {
"changed": true,
"generated_timestamp": "2019-09-04 10:55:54.943632",
"msg": "Tags {'Name': 'oct-terminate'} created for resource i-00cad1e0a8c636632."
}
TASK [tear down the EC2 instance] **********************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:15
changed: [localhost] => {
"changed": true,
"generated_timestamp": "2019-09-04 10:55:55.844880",
"instance_ids": [
"i-00cad1e0a8c636632"
],
"instances": [
{
"ami_launch_index": "0",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/sda1": {
"delete_on_termination": true,
"status": "attached",
"volume_id": "vol-097be3317dc12d72a"
},
"/dev/sdb": {
"delete_on_termination": true,
"status": "attached",
"volume_id": "vol-02974abdb2ed88770"
}
},
"dns_name": "ec2-54-82-177-53.compute-1.amazonaws.com",
"ebs_optimized": false,
"groups": {
"sg-7e73221a": "default"
},
"hypervisor": "xen",
"id": "i-00cad1e0a8c636632",
"image_id": "ami-0b77b87a37c3e662c",
"instance_type": "m4.xlarge",
"kernel": null,
"key_name": "libra",
"launch_time": "2019-09-04T14:22:33.000Z",
"placement": "us-east-1c",
"private_dns_name": "ip-172-18-18-57.ec2.internal",
"private_ip": "172.18.18.57",
"public_dns_name": "ec2-54-82-177-53.compute-1.amazonaws.com",
"public_ip": "54.82.177.53",
"ramdisk": null,
"region": "us-east-1",
"root_device_name": "/dev/sda1",
"root_device_type": "ebs",
"state": "running",
"state_code": 16,
"tags": {
"Name": "oct-terminate",
"openshift_etcd": "",
"openshift_master": "",
"openshift_node": ""
},
"tenancy": "default",
"virtualization_type": "hvm"
}
],
"tagged_instances": []
}
TASK [remove the serialized host variables] ************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:22
changed: [localhost] => {
"changed": true,
"generated_timestamp": "2019-09-04 10:55:56.104093",
"path": "/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.18.57.yml",
"state": "absent"
}
PLAY [deprovision virtual hosts locally manged by Vagrant] *********************
TASK [Gathering Facts] *********************************************************
ok: [localhost]
PLAY [clean up local configuration for deprovisioned instances] ****************
TASK [remove inventory configuration directory] ********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:61
changed: [localhost] => {
"changed": true,
"generated_timestamp": "2019-09-04 10:55:56.646193",
"path": "/var/lib/jenkins/jobs/ci-kubernetes-aws-actuator/workspace/.config/origin-ci-tool/inventory",
"state": "absent"
}
PLAY RECAP *********************************************************************
localhost : ok=8 changed=4 unreachable=0 failed=0
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPROVISION CLOUD RESOURCES [00h 00m 04s] ##########
Archiving artifacts
Recording test results
[WS-CLEANUP] Deleting project workspace...
[WS-CLEANUP] Deferred wipeout is used...
[WS-CLEANUP] done
Finished: SUCCESS