Started by user OpenShift CI Robot [EnvInject] - Loading node environment variables. Building in workspace /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is used... [workspace] $ /bin/bash /tmp/jenkins358132401197429903.sh ########## STARTING STAGE: INSTALL THE ORIGIN-CI-TOOL ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] ++ readlink /var/lib/jenkins/origin-ci-tool/latest + latest=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed + touch /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed + cp /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin/activate /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate + cat + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config + mkdir -p /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config + rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool + oct configure ansible-client verbosity 2 Option verbosity updated to be 2. + oct configure aws-client keypair_name libra Option keypair_name updated to be libra. + oct configure aws-client private_key_path /var/lib/jenkins/.ssh/devenv.pem Option private_key_path updated to be /var/lib/jenkins/.ssh/devenv.pem. + set +o xtrace ########## FINISHED STAGE: SUCCESS: INSTALL THE ORIGIN-CI-TOOL [00h 00m 02s] ########## [workspace] $ /bin/bash /tmp/jenkins2765500342490802679.sh ########## STARTING STAGE: PROVISION CLOUD RESOURCES ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config + oct provision remote all-in-one --os rhel --stage base --provider aws --discrete-ssh-config --name pull-ci-openshift-cluster-api-actuator-pkg-master-e2e_548 PLAYBOOK: aws-up.yml *********************************************************** 2 plays in /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml PLAY [ensure we have the parameters necessary to bring up the AWS EC2 instance] *** TASK [ensure all required variables are set] *********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml:9 skipping: [localhost] => (item=origin_ci_inventory_dir) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.785416", "item": "origin_ci_inventory_dir", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_keypair_name) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.789086", "item": "origin_ci_aws_keypair_name", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_private_key_path) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.792219", "item": "origin_ci_aws_private_key_path", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_region) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.795499", "item": "origin_ci_aws_region", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_ami_tags) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.798548", "item": "origin_ci_aws_ami_tags", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_instance_name) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.801588", "item": "origin_ci_aws_instance_name", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_master_instance_type) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.804606", "item": "origin_ci_aws_master_instance_type", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_identifying_tag_key) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.808836", "item": "origin_ci_aws_identifying_tag_key", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_hostname) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.811899", "item": "origin_ci_aws_hostname", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_ssh_config_strategy) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.814952", "item": "origin_ci_ssh_config_strategy", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=openshift_schedulable) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.818007", "item": "openshift_schedulable", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=openshift_node_labels) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.821250", "item": "openshift_node_labels", "skip_reason": "Conditional check failed", "skipped": true } TASK [ensure all required variables are set] *********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml:27 skipping: [localhost] => (item=origin_ci_aws_master_subnet) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.855256", "item": "origin_ci_aws_master_subnet", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_etcd_security_group) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.860106", "item": "origin_ci_aws_etcd_security_group", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_node_security_group) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.865684", "item": "origin_ci_aws_node_security_group", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_master_security_group) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.869895", "item": "origin_ci_aws_master_security_group", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_master_external_elb_security_group) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.875320", "item": "origin_ci_aws_master_external_elb_security_group", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_master_internal_elb_security_group) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.879538", "item": "origin_ci_aws_master_internal_elb_security_group", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_router_security_group) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.884941", "item": "origin_ci_aws_router_security_group", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_router_elb_security_group) => { "changed": false, "generated_timestamp": "2019-09-04 11:54:54.889823", "item": "origin_ci_aws_router_elb_security_group", "skip_reason": "Conditional check failed", "skipped": true } PLAY [provision an AWS EC2 instance] ******************************************* TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [inventory : initialize the inventory directory] ************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:2 ok: [localhost] => { "changed": false, "generated_timestamp": "2019-09-04 11:54:55.705100", "gid": 995, "group": "jenkins", "mode": "0755", "owner": "jenkins", "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory", "secontext": "system_u:object_r:var_lib_t:s0", "size": 6, "state": "directory", "uid": 997 } TASK [inventory : add the nested group mapping] ******************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:7 changed: [localhost] => { "changed": true, "checksum": "18aaee00994df38cc3a63b635893175235331a9c", "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/nested_group_mappings", "generated_timestamp": "2019-09-04 11:54:56.174214", "gid": 995, "group": "jenkins", "md5sum": "b30c3226ea63efa3ff9c5e346c14a16e", "mode": "0644", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 93, "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567612495.95-107298876650117/source", "state": "file", "uid": 997 } TASK [inventory : initialize the OSEv3 group variables directory] ************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:12 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-09-04 11:54:56.342742", "gid": 995, "group": "jenkins", "mode": "0755", "owner": "jenkins", "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/group_vars/OSEv3", "secontext": "system_u:object_r:var_lib_t:s0", "size": 6, "state": "directory", "uid": 997 } TASK [inventory : initialize the host variables directory] ********************* task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:17 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-09-04 11:54:56.508883", "gid": 995, "group": "jenkins", "mode": "0755", "owner": "jenkins", "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/host_vars", "secontext": "system_u:object_r:var_lib_t:s0", "size": 6, "state": "directory", "uid": 997 } TASK [inventory : add the default Origin installation configuration] *********** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:22 changed: [localhost] => { "changed": true, "checksum": "4c06ba508f055c20f13426e8587342e8765a7b66", "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/group_vars/OSEv3/general.yml", "generated_timestamp": "2019-09-04 11:54:56.808610", "gid": 995, "group": "jenkins", "md5sum": "8aec71c75f7d512b278ae7c6f2959b12", "mode": "0644", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 331, "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567612496.67-25242189034065/source", "state": "file", "uid": 997 } TASK [aws-up : determine if we are inside AWS EC2] ***************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:2 changed: [localhost] => { "changed": true, "cmd": [ "curl", "-s", "http://instance-data.ec2.internal" ], "delta": "0:00:00.015468", "end": "2019-09-04 11:54:57.037509", "failed": false, "failed_when_result": false, "generated_timestamp": "2019-09-04 11:54:57.054164", "rc": 0, "start": "2019-09-04 11:54:57.022041", "stderr": [], "stdout": [ "1.0", "2007-01-19", "2007-03-01", "2007-08-29", "2007-10-10", "2007-12-15", "2008-02-01", "2008-09-01", "2009-04-04", "2011-01-01", "2011-05-01", "2012-01-12", "2014-02-25", "2014-11-05", "2015-10-20", "2016-04-19", "2016-06-30", "2016-09-02", "2018-03-28", "2018-08-17", "2018-09-24", "latest" ], "warnings": [ "Consider using get_url or uri module rather than running curl" ] } [WARNING]: Consider using get_url or uri module rather than running curl TASK [aws-up : configure EC2 parameters for inventory when controlling from inside EC2] *** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:7 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_destination_variable": "private_dns_name", "origin_ci_aws_host_address_variable": "private_ip", "origin_ci_aws_vpc_destination_variable": "private_ip_address" }, "changed": false, "generated_timestamp": "2019-09-04 11:54:57.093546" } TASK [aws-up : determine where to put the AWS API cache] *********************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:14 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_cache_dir": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ec2_cache" }, "changed": false, "generated_timestamp": "2019-09-04 11:54:57.127232" } TASK [aws-up : ensure we have a place to put the AWS API cache] **************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:18 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-09-04 11:54:57.289854", "gid": 995, "group": "jenkins", "mode": "0755", "owner": "jenkins", "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ec2_cache", "secontext": "system_u:object_r:var_lib_t:s0", "size": 6, "state": "directory", "uid": 997 } TASK [aws-up : place the EC2 dynamic inventory script] ************************* task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:23 changed: [localhost] => { "changed": true, "checksum": "625b8af723189db3b96ba0026d0f997a0025bc47", "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/ec2.py", "generated_timestamp": "2019-09-04 11:54:57.586410", "gid": 995, "group": "jenkins", "md5sum": "cac06c14065dac74904232b89d4ba24c", "mode": "0755", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 63725, "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567612497.45-74138137152386/source", "state": "file", "uid": 997 } TASK [aws-up : place the EC2 dynamic inventory configuration] ****************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:29 changed: [localhost] => { "changed": true, "checksum": "95cfab7cb70fbdbaf93954bddd991863245ba271", "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/ec2.ini", "generated_timestamp": "2019-09-04 11:54:57.884903", "gid": 995, "group": "jenkins", "md5sum": "3ae11ff48b8302999169180e468666e6", "mode": "0644", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 429, "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567612497.62-1652222252294/source", "state": "file", "uid": 997 } TASK [aws-up : place the EC2 tag to group mappings] **************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:34 changed: [localhost] => { "changed": true, "checksum": "b4205a33dc73f62bd4f77f35d045cf8e09ae62b0", "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/tag_to_group_mappings", "generated_timestamp": "2019-09-04 11:54:58.182671", "gid": 995, "group": "jenkins", "md5sum": "bc3a567a1b6f342e1005182efc1b66be", "mode": "0644", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 287, "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567612498.04-110372553016443/source", "state": "file", "uid": 997 } TASK [aws-up : list available AMIs] ******************************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:40 ok: [localhost] => { "changed": false, "generated_timestamp": "2019-09-04 11:55:01.097178", "results": [ { "ami_id": "ami-04f9b88b6b0571f20", "architecture": "x86_64", "block_device_mapping": { "/dev/sda1": { "delete_on_termination": true, "encrypted": false, "size": 75, "snapshot_id": "snap-0655d2d962c590c8c", "volume_type": "gp2" }, "/dev/sdb": { "delete_on_termination": true, "encrypted": false, "size": 50, "snapshot_id": "snap-0d86ae865b17f4def", "volume_type": "gp2" } }, "creationDate": "2018-06-26T12:22:31.000Z", "description": "OpenShift Origin development AMI on rhel at the base stage.", "hypervisor": "xen", "is_public": false, "location": "531415883065/ami_build_origin_int_rhel_base_758", "name": "ami_build_origin_int_rhel_base_758", "owner_id": "531415883065", "platform": null, "root_device_name": "/dev/sda1", "root_device_type": "ebs", "state": "available", "tags": { "Name": "ami_build_origin_int_rhel_base_758", "image_stage": "base", "operating_system": "rhel", "ready": "yes" }, "virtualization_type": "hvm" }, { "ami_id": "ami-0b77b87a37c3e662c", "architecture": "x86_64", "block_device_mapping": { "/dev/sda1": { "delete_on_termination": true, "encrypted": false, "size": 75, "snapshot_id": "snap-02ec23d4818f2747e", "volume_type": "gp2" }, "/dev/sdb": { "delete_on_termination": true, "encrypted": false, "size": 50, "snapshot_id": "snap-0d8726e441d4ca329", "volume_type": "gp2" } }, "creationDate": "2018-06-26T22:18:53.000Z", "description": "OpenShift Origin development AMI on rhel at the base stage.", "hypervisor": "xen", "is_public": false, "location": "531415883065/ami_build_origin_int_rhel_base_760", "name": "ami_build_origin_int_rhel_base_760", "owner_id": "531415883065", "platform": null, "root_device_name": "/dev/sda1", "root_device_type": "ebs", "state": "available", "tags": { "Name": "ami_build_origin_int_rhel_base_760", "image_stage": "base", "operating_system": "rhel", "ready": "yes" }, "virtualization_type": "hvm" } ] } TASK [aws-up : choose appropriate AMIs for use] ******************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:49 ok: [localhost] => (item={u'ami_id': u'ami-04f9b88b6b0571f20', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the base stage.', u'tags': {u'ready': u'yes', u'image_stage': u'base', u'Name': u'ami_build_origin_int_rhel_base_758', u'operating_system': u'rhel'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-0d86ae865b17f4def', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 50}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-0655d2d962c590c8c', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 75}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_base_758', u'is_public': False, u'creationDate': u'2018-06-26T12:22:31.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_base_758'}) => { "ansible_facts": { "origin_ci_aws_ami_id_candidate": "ami-04f9b88b6b0571f20" }, "changed": false, "generated_timestamp": "2019-09-04 11:55:01.142836", "item": { "ami_id": "ami-04f9b88b6b0571f20", "architecture": "x86_64", "block_device_mapping": { "/dev/sda1": { "delete_on_termination": true, "encrypted": false, "size": 75, "snapshot_id": "snap-0655d2d962c590c8c", "volume_type": "gp2" }, "/dev/sdb": { "delete_on_termination": true, "encrypted": false, "size": 50, "snapshot_id": "snap-0d86ae865b17f4def", "volume_type": "gp2" } }, "creationDate": "2018-06-26T12:22:31.000Z", "description": "OpenShift Origin development AMI on rhel at the base stage.", "hypervisor": "xen", "is_public": false, "location": "531415883065/ami_build_origin_int_rhel_base_758", "name": "ami_build_origin_int_rhel_base_758", "owner_id": "531415883065", "platform": null, "root_device_name": "/dev/sda1", "root_device_type": "ebs", "state": "available", "tags": { "Name": "ami_build_origin_int_rhel_base_758", "image_stage": "base", "operating_system": "rhel", "ready": "yes" }, "virtualization_type": "hvm" } } ok: [localhost] => (item={u'ami_id': u'ami-0b77b87a37c3e662c', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the base stage.', u'tags': {u'ready': u'yes', u'image_stage': u'base', u'Name': u'ami_build_origin_int_rhel_base_760', u'operating_system': u'rhel'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-0d8726e441d4ca329', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 50}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-02ec23d4818f2747e', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 75}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_base_760', u'is_public': False, u'creationDate': u'2018-06-26T22:18:53.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_base_760'}) => { "ansible_facts": { "origin_ci_aws_ami_id_candidate": "ami-0b77b87a37c3e662c" }, "changed": false, "generated_timestamp": "2019-09-04 11:55:01.147620", "item": { "ami_id": "ami-0b77b87a37c3e662c", "architecture": "x86_64", "block_device_mapping": { "/dev/sda1": { "delete_on_termination": true, "encrypted": false, "size": 75, "snapshot_id": "snap-02ec23d4818f2747e", "volume_type": "gp2" }, "/dev/sdb": { "delete_on_termination": true, "encrypted": false, "size": 50, "snapshot_id": "snap-0d8726e441d4ca329", "volume_type": "gp2" } }, "creationDate": "2018-06-26T22:18:53.000Z", "description": "OpenShift Origin development AMI on rhel at the base stage.", "hypervisor": "xen", "is_public": false, "location": "531415883065/ami_build_origin_int_rhel_base_760", "name": "ami_build_origin_int_rhel_base_760", "owner_id": "531415883065", "platform": null, "root_device_name": "/dev/sda1", "root_device_type": "ebs", "state": "available", "tags": { "Name": "ami_build_origin_int_rhel_base_760", "image_stage": "base", "operating_system": "rhel", "ready": "yes" }, "virtualization_type": "hvm" } } TASK [aws-up : determine which AMI to use] ************************************* task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:55 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_ami_id": "ami-0b77b87a37c3e662c" }, "changed": false, "generated_timestamp": "2019-09-04 11:55:01.187666" } TASK [aws-up : determine which subnets are available] ************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:60 ok: [localhost] => { "changed": false, "generated_timestamp": "2019-09-04 11:55:02.233592", "subnets": [ { "availability_zone": "us-east-1c", "available_ip_address_count": 4068, "cidr_block": "172.18.16.0/20", "default_for_az": "false", "id": "subnet-8bdb5ac2", "map_public_ip_on_launch": "true", "state": "available", "tags": { "Name": "devenv-subnet-2", "origin_ci_aws_cluster_component": "master_subnet" }, "vpc_id": "vpc-69705d0c" }, { "availability_zone": "us-east-1d", "available_ip_address_count": 3987, "cidr_block": "172.18.0.0/20", "default_for_az": "false", "id": "subnet-cf57c596", "map_public_ip_on_launch": "true", "state": "available", "tags": { "Name": "devenv-subnet-1", "origin_ci_aws_cluster_component": "master_subnet" }, "vpc_id": "vpc-69705d0c" } ] } TASK [aws-up : determine which subnets to use for the master] ****************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:67 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_master_subnet_ids": [ "subnet-8bdb5ac2", "subnet-cf57c596" ] }, "changed": false, "generated_timestamp": "2019-09-04 11:55:02.275149" } TASK [aws-up : determine which security groups are available] ****************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:72 ok: [localhost] => { "changed": false, "generated_timestamp": "2019-09-04 11:55:03.426289", "security_groups": [ { "description": "default VPC security group", "group_id": "sg-7e73221a", "group_name": "default", "ip_permissions": [ { "ip_protocol": "-1", "ip_ranges": [], "ipv6_ranges": [], "prefix_list_ids": [], "user_id_group_pairs": [ { "group_id": "sg-7e73221a", "user_id": "531415883065" } ] }, { "from_port": 80, "ip_protocol": "tcp", "ip_ranges": [ { "cidr_ip": "54.241.19.245/32" }, { "cidr_ip": "97.65.119.184/29" }, { "cidr_ip": "107.20.219.35/32" }, { "cidr_ip": "108.166.48.153/32" }, { "cidr_ip": "212.199.177.64/27" }, { "cidr_ip": "212.72.208.162/32" } ], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": 443, "user_id_group_pairs": [] }, { "from_port": 53, "ip_protocol": "tcp", "ip_ranges": [ { "cidr_ip": "119.254.120.64/26" }, { "cidr_ip": "209.132.176.0/20" }, { "cidr_ip": "209.132.186.34/32" }, { "cidr_ip": "213.175.37.10/32" }, { "cidr_ip": "62.40.79.66/32" }, { "cidr_ip": "66.187.224.0/20" }, { "cidr_ip": "66.187.239.0/24" }, { "cidr_ip": "38.140.108.0/24" }, { "cidr_ip": "213.175.37.9/32" }, { "cidr_ip": "38.99.12.232/29" }, { "cidr_ip": "4.14.33.72/30" }, { "cidr_ip": "4.14.35.88/29" }, { "cidr_ip": "50.227.40.96/29" } ], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": 8444, "user_id_group_pairs": [] }, { "from_port": 22, "ip_protocol": "tcp", "ip_ranges": [ { "cidr_ip": "0.0.0.0/0" } ], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": 22, "user_id_group_pairs": [] }, { "from_port": 53, "ip_protocol": "udp", "ip_ranges": [ { "cidr_ip": "209.132.176.0/20" }, { "cidr_ip": "66.187.224.0/20" }, { "cidr_ip": "66.187.239.0/24" } ], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": 53, "user_id_group_pairs": [] }, { "from_port": 0, "ip_protocol": "udp", "ip_ranges": [], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": 65535, "user_id_group_pairs": [ { "group_id": "sg-0d1a5377", "user_id": "531415883065" }, { "group_id": "sg-5875023f", "user_id": "531415883065" }, { "group_id": "sg-7e73221a", "user_id": "531415883065" }, { "group_id": "sg-e1760186", "user_id": "531415883065" } ] }, { "from_port": 3389, "ip_protocol": "tcp", "ip_ranges": [ { "cidr_ip": "0.0.0.0/0" } ], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": 3389, "user_id_group_pairs": [] }, { "from_port": -1, "ip_protocol": "icmp", "ip_ranges": [ { "cidr_ip": "0.0.0.0/0" } ], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": -1, "user_id_group_pairs": [] } ], "ip_permissions_egress": [ { "ip_protocol": "-1", "ip_ranges": [ { "cidr_ip": "0.0.0.0/0" } ], "ipv6_ranges": [], "prefix_list_ids": [], "user_id_group_pairs": [] } ], "owner_id": "531415883065", "tags": { "Name": "devenv-vpc", "openshift_infra": "true", "origin_ci_aws_cluster_component": "master_security_group" }, "vpc_id": "vpc-69705d0c" } ] } TASK [aws-up : determine which security group to use] ************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:79 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_master_security_group_ids": [ "sg-7e73221a" ] }, "changed": false, "generated_timestamp": "2019-09-04 11:55:03.493115" } TASK [aws-up : provision an AWS EC2 instance] ********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:84 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-09-04 11:55:21.032961", "instance_ids": [ "i-08faf914cd4493369" ], "instances": [ { "ami_launch_index": "0", "architecture": "x86_64", "block_device_mapping": { "/dev/sda1": { "delete_on_termination": true, "status": "attached", "volume_id": "vol-00985a53f235b4484" }, "/dev/sdb": { "delete_on_termination": true, "status": "attached", "volume_id": "vol-02c3588bcfd4d1e4f" } }, "dns_name": "ec2-3-95-66-137.compute-1.amazonaws.com", "ebs_optimized": false, "groups": { "sg-7e73221a": "default" }, "hypervisor": "xen", "id": "i-08faf914cd4493369", "image_id": "ami-0b77b87a37c3e662c", "instance_type": "m4.xlarge", "kernel": null, "key_name": "libra", "launch_time": "2019-09-04T15:55:05.000Z", "placement": "us-east-1c", "private_dns_name": "ip-172-18-25-165.ec2.internal", "private_ip": "172.18.25.165", "public_dns_name": "ec2-3-95-66-137.compute-1.amazonaws.com", "public_ip": "3.95.66.137", "ramdisk": null, "region": "us-east-1", "root_device_name": "/dev/sda1", "root_device_type": "ebs", "state": "running", "state_code": 16, "tags": { "Name": "pull-ci-openshift-cluster-api-actuator-pkg-master-e2e_548", "openshift_etcd": "", "openshift_master": "", "openshift_node": "" }, "tenancy": "default", "virtualization_type": "hvm" } ], "tagged_instances": [] } TASK [aws-up : determine the host address] ************************************* task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:110 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_host": "172.18.25.165" }, "changed": false, "generated_timestamp": "2019-09-04 11:55:21.072263" } TASK [aws-up : determine the default user to use for SSH] ********************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:114 skipping: [localhost] => { "changed": false, "generated_timestamp": "2019-09-04 11:55:21.107349", "skip_reason": "Conditional check failed", "skipped": true } TASK [aws-up : determine the default user to use for SSH] ********************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:119 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_ssh_user": "origin" }, "changed": false, "generated_timestamp": "2019-09-04 11:55:21.146547" } TASK [aws-up : update variables for the host] ********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:124 changed: [localhost] => { "changed": true, "checksum": "2af42033776339cb280f7ecf9a4f542278a2bb71", "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.25.165.yml", "generated_timestamp": "2019-09-04 11:55:21.460906", "gid": 995, "group": "jenkins", "md5sum": "9fa1db406cfd8d0111c6e9d83c7c9e43", "mode": "0644", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 773, "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567612521.32-231618455906355/source", "state": "file", "uid": 997 } TASK [aws-up : determine where updated SSH configuration should go] ************ task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:141 ok: [localhost] => { "ansible_facts": { "origin_ci_ssh_config_files": [ "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config" ] }, "changed": false, "generated_timestamp": "2019-09-04 11:55:21.499208" } TASK [aws-up : determine where updated SSH configuration should go] ************ task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:146 skipping: [localhost] => { "changed": false, "generated_timestamp": "2019-09-04 11:55:21.529891", "skip_reason": "Conditional check failed", "skipped": true } TASK [aws-up : ensure the targeted SSH configuration file exists] ************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:151 changed: [localhost] => (item=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config) => { "changed": true, "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config", "generated_timestamp": "2019-09-04 11:55:21.696355", "gid": 995, "group": "jenkins", "item": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config", "mode": "0644", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 0, "state": "file", "uid": 997 } TASK [aws-up : update the SSH configuration] *********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:157 changed: [localhost] => (item=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config) => { "changed": true, "generated_timestamp": "2019-09-04 11:55:21.951548", "item": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config", "msg": "Block inserted" } TASK [aws-up : wait for SSH to be available] *********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:175 ok: [localhost] => { "changed": false, "elapsed": 75, "generated_timestamp": "2019-09-04 11:56:37.292811", "path": null, "port": 22, "search_regex": null, "state": "started" } PLAY RECAP ********************************************************************* localhost : ok=28 changed=13 unreachable=0 failed=0 + set +o xtrace ########## FINISHED STAGE: SUCCESS: PROVISION CLOUD RESOURCES [00h 01m 43s] ########## [workspace] $ /bin/bash /tmp/jenkins2276690373434330794.sh ########## STARTING STAGE: FORWARD GCS CREDENTIALS TO REMOTE HOST ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config + (( i = 0 )) + (( i < 10 )) + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.config/gcloud/gcs-publisher-credentials.json openshiftdevel:/data/credentials.json + break + set +o xtrace ########## FINISHED STAGE: SUCCESS: FORWARD GCS CREDENTIALS TO REMOTE HOST [00h 00m 02s] ########## [workspace] $ /bin/bash /tmp/jenkins2839885611859024988.sh ########## STARTING STAGE: FORWARD PARAMETERS TO THE REMOTE HOST ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod o+rw /etc/environment + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-cluster-api-actuator-pkg-master-e2e","buildid":"1169277617493250048","prowjobid":"47aee29c-cf2c-11e9-bd8d-0a58ac102ebf","refs":{"org":"openshift","repo":"cluster-api-actuator-pkg","repo_link":"https://github.com/openshift/cluster-api-actuator-pkg","base_ref":"master","base_sha":"b3dad42ded9cf0288809ca2cef3311c06339e749","base_link":"https://github.com/openshift/cluster-api-actuator-pkg/commit/b3dad42ded9cf0288809ca2cef3311c06339e749","pulls":[{"number":114,"author":"frobware","sha":"801a89559c2b745e456c44495e502136bfd3391b","link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114","commit_link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114/commits/801a89559c2b745e456c44495e502136bfd3391b","author_link":"https://github.com/frobware"}]}}'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''buildId='\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_ID=1169277617493250048'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''REPO_OWNER=openshift'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''REPO_NAME=cluster-api-actuator-pkg'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_BASE_REF=master'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_BASE_SHA=b3dad42ded9cf0288809ca2cef3311c06339e749'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_REFS=master:b3dad42ded9cf0288809ca2cef3311c06339e749,114:801a89559c2b745e456c44495e502136bfd3391b'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_NUMBER=114'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_PULL_SHA=801a89559c2b745e456c44495e502136bfd3391b'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-cluster-api-actuator-pkg-master-e2e","buildid":"1169277617493250048","prowjobid":"47aee29c-cf2c-11e9-bd8d-0a58ac102ebf","refs":{"org":"openshift","repo":"cluster-api-actuator-pkg","repo_link":"https://github.com/openshift/cluster-api-actuator-pkg","base_ref":"master","base_sha":"b3dad42ded9cf0288809ca2cef3311c06339e749","base_link":"https://github.com/openshift/cluster-api-actuator-pkg/commit/b3dad42ded9cf0288809ca2cef3311c06339e749","pulls":[{"number":114,"author":"frobware","sha":"801a89559c2b745e456c44495e502136bfd3391b","link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114","commit_link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114/commits/801a89559c2b745e456c44495e502136bfd3391b","author_link":"https://github.com/frobware"}]}}'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_NUMBER=548'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''CLONEREFS_ARGS='\'' >> /etc/environment' + set +o xtrace ########## FINISHED STAGE: SUCCESS: FORWARD PARAMETERS TO THE REMOTE HOST [00h 00m 04s] ########## [workspace] $ /bin/bash /tmp/jenkins2045763661518429886.sh ########## STARTING STAGE: SYNC REPOSITORIES ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.YU0JsrTq2w + cat + chmod +x /tmp/tmp.YU0JsrTq2w + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.YU0JsrTq2w openshiftdevel:/tmp/tmp.YU0JsrTq2w + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.YU0JsrTq2w"' + cd /home/origin ++ jq --compact-output '.buildid |= "548"' + JOB_SPEC='{"type":"presubmit","job":"pull-ci-openshift-cluster-api-actuator-pkg-master-e2e","buildid":"548","prowjobid":"47aee29c-cf2c-11e9-bd8d-0a58ac102ebf","refs":{"org":"openshift","repo":"cluster-api-actuator-pkg","repo_link":"https://github.com/openshift/cluster-api-actuator-pkg","base_ref":"master","base_sha":"b3dad42ded9cf0288809ca2cef3311c06339e749","base_link":"https://github.com/openshift/cluster-api-actuator-pkg/commit/b3dad42ded9cf0288809ca2cef3311c06339e749","pulls":[{"number":114,"author":"frobware","sha":"801a89559c2b745e456c44495e502136bfd3391b","link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114","commit_link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114/commits/801a89559c2b745e456c44495e502136bfd3391b","author_link":"https://github.com/frobware"}]}}' + for image in ''\''registry.svc.ci.openshift.org/ci/clonerefs:latest'\''' ''\''registry.svc.ci.openshift.org/ci/initupload:latest'\''' + (( i = 0 )) + (( i < 5 )) + docker pull registry.svc.ci.openshift.org/ci/clonerefs:latest Trying to pull repository registry.svc.ci.openshift.org/ci/clonerefs ... latest: Pulling from registry.svc.ci.openshift.org/ci/clonerefs 1160f4abea84: Pulling fs layer be60dbe7622d: Pulling fs layer d26b76701841: Pulling fs layer 1b90cab916ea: Pulling fs layer 3a00cbb24bdb: Pulling fs layer 1b90cab916ea: Waiting 3a00cbb24bdb: Waiting 1160f4abea84: Verifying Checksum 1160f4abea84: Download complete be60dbe7622d: Verifying Checksum be60dbe7622d: Download complete 3a00cbb24bdb: Verifying Checksum 3a00cbb24bdb: Download complete d26b76701841: Verifying Checksum d26b76701841: Download complete 1b90cab916ea: Verifying Checksum 1b90cab916ea: Download complete 1160f4abea84: Pull complete be60dbe7622d: Pull complete d26b76701841: Pull complete 1b90cab916ea: Pull complete 3a00cbb24bdb: Pull complete Digest: sha256:d68e1c6c2de5c1167a79b24d5ba4f909349ca7a44fb634e214bdadc2c8b010cd Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/clonerefs:latest + break + for image in ''\''registry.svc.ci.openshift.org/ci/clonerefs:latest'\''' ''\''registry.svc.ci.openshift.org/ci/initupload:latest'\''' + (( i = 0 )) + (( i < 5 )) + docker pull registry.svc.ci.openshift.org/ci/initupload:latest Trying to pull repository registry.svc.ci.openshift.org/ci/initupload ... latest: Pulling from registry.svc.ci.openshift.org/ci/initupload a073c86ecf9e: Pulling fs layer cc3fc741b1a9: Pulling fs layer 8f72556ef119: Pulling fs layer 8e5b170ec95b: Pulling fs layer 8e5b170ec95b: Waiting a073c86ecf9e: Verifying Checksum a073c86ecf9e: Download complete cc3fc741b1a9: Verifying Checksum cc3fc741b1a9: Download complete a073c86ecf9e: Pull complete 8f72556ef119: Verifying Checksum 8f72556ef119: Download complete 8e5b170ec95b: Verifying Checksum 8e5b170ec95b: Download complete cc3fc741b1a9: Pull complete 8f72556ef119: Pull complete 8e5b170ec95b: Pull complete Digest: sha256:e651a6455ada7c070c439eddcd753e2e2ac1fb934c4f2a526c37a4674c8eaee4 Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/initupload:latest + break + clonerefs_args='--repo=openshift,cluster-api-provider-kubemark=master --repo=openshift,machine-api-operator=master --repo=openshift,cluster-autoscaler-operator=master --repo=openshift,kubernetes-autoscaler=master ' + docker run -v /data:/data:z registry.svc.ci.openshift.org/ci/clonerefs:latest --src-root=/data --log=/data/clone.json --repo=openshift,cluster-api-actuator-pkg=master:b3dad42ded9cf0288809ca2cef3311c06339e749,114:801a89559c2b745e456c44495e502136bfd3391b --repo=openshift,cluster-api-provider-kubemark=master --repo=openshift,machine-api-operator=master --repo=openshift,cluster-autoscaler-operator=master --repo=openshift,kubernetes-autoscaler=master {"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-api-actuator-pkg","base_ref":"master","base_sha":"b3dad42ded9cf0288809ca2cef3311c06339e749","pulls":[{"number":114,"author":"","sha":"801a89559c2b745e456c44495e502136bfd3391b"}]},"time":"2019-09-04T15:57:46Z"} {"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-api-provider-kubemark","base_ref":"master"},"time":"2019-09-04T15:57:46Z"} {"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"kubernetes-autoscaler","base_ref":"master"},"time":"2019-09-04T15:57:46Z"} {"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"machine-api-operator","base_ref":"master"},"time":"2019-09-04T15:57:46Z"} {"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-autoscaler-operator","base_ref":"master"},"time":"2019-09-04T15:57:46Z"} {"command":"mkdir -p /data/src/github.com/openshift/cluster-api-provider-kubemark","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-api-provider-kubemark/.git/\n","time":"2019-09-04T15:57:46Z"} {"command":"mkdir -p /data/src/github.com/openshift/cluster-api-actuator-pkg","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"mkdir -p /data/src/github.com/openshift/kubernetes-autoscaler","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"mkdir -p /data/src/github.com/openshift/machine-api-operator","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"mkdir -p /data/src/github.com/openshift/cluster-autoscaler-operator","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-api-actuator-pkg/.git/\n","time":"2019-09-04T15:57:46Z"} {"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/kubernetes-autoscaler/.git/\n","time":"2019-09-04T15:57:46Z"} {"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/machine-api-operator/.git/\n","time":"2019-09-04T15:57:46Z"} {"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-autoscaler-operator/.git/\n","time":"2019-09-04T15:57:46Z"} {"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:46Z"} {"command":"git fetch https://github.com/openshift/machine-api-operator.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/machine-api-operator\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] v0.1.0 -\u003e v0.1.0\n * [new tag] v0.2.0 -\u003e v0.2.0\n","time":"2019-09-04T15:57:48Z"} {"command":"git fetch https://github.com/openshift/machine-api-operator.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/machine-api-operator\n * branch master -\u003e FETCH_HEAD\n","time":"2019-09-04T15:57:48Z"} {"command":"git fetch https://github.com/openshift/cluster-autoscaler-operator.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-autoscaler-operator\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] v0.0.0 -\u003e v0.0.0\n","time":"2019-09-04T15:57:49Z"} {"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 474e14e... Merge pull request #391 from mgugino-upstream-stage/related-resources\n","time":"2019-09-04T15:57:49Z"} {"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:49Z"} {"command":"git fetch https://github.com/openshift/cluster-autoscaler-operator.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-autoscaler-operator\n * branch master -\u003e FETCH_HEAD\n","time":"2019-09-04T15:57:50Z"} {"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-04T15:57:50Z"} {"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 045aea45... Merge pull request #117 from enxebre/more-related-objects\n","time":"2019-09-04T15:57:51Z"} {"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:51Z"} {"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:51Z"} {"command":"git fetch https://github.com/openshift/cluster-api-provider-kubemark.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-kubemark\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] v1.0 -\u003e v1.0\n","time":"2019-09-04T15:57:51Z"} {"command":"git fetch https://github.com/openshift/cluster-api-provider-kubemark.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-kubemark\n * branch master -\u003e FETCH_HEAD\n","time":"2019-09-04T15:57:51Z"} {"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-04T15:57:51Z"} {"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:51Z"} {"command":"git fetch https://github.com/openshift/cluster-api-actuator-pkg.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-actuator-pkg\n * branch HEAD -\u003e FETCH_HEAD\n","time":"2019-09-04T15:57:52Z"} {"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 45659b31... Merge pull request #27 from frobware/bump-openshift-cluster-api-deps\n","time":"2019-09-04T15:57:52Z"} {"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:52Z"} {"command":"git fetch https://github.com/openshift/cluster-api-actuator-pkg.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-actuator-pkg\n * branch master -\u003e FETCH_HEAD\n","time":"2019-09-04T15:57:53Z"} {"command":"git checkout b3dad42ded9cf0288809ca2cef3311c06339e749","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'b3dad42ded9cf0288809ca2cef3311c06339e749'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at b3dad42d... Merge pull request #113 from frobware/retry-on-error\n","time":"2019-09-04T15:57:53Z"} {"command":"git branch --force master b3dad42ded9cf0288809ca2cef3311c06339e749","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:53Z"} {"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-04T15:57:53Z"} {"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:54Z"} {"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-04T15:57:54Z"} {"command":"git fetch https://github.com/openshift/cluster-api-actuator-pkg.git pull/114/head","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-actuator-pkg\n * branch refs/pull/114/head -\u003e FETCH_HEAD\n","time":"2019-09-04T15:57:54Z"} {"command":"git merge --no-ff 801a89559c2b745e456c44495e502136bfd3391b","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Merge made by the 'recursive' strategy.\n pkg/e2e/autoscaler/autoscaler.go | 72 +++++++++++++++++++++++++++++-----------\n 1 file changed, 53 insertions(+), 19 deletions(-)\n","time":"2019-09-04T15:57:54Z"} {"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:54Z"} {"command":"git fetch https://github.com/openshift/kubernetes-autoscaler.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/kubernetes-autoscaler\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] addon-resizer-1.8.0 -\u003e addon-resizer-1.8.0\n * [new tag] addon-resizer-1.8.1 -\u003e addon-resizer-1.8.1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.37.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.37.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.38.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.38.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.39.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.39.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.40.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.40.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.41.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.41.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.42.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.42.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.43.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.43.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.44.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.44.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.46.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.46.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.47.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.47.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.50.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.50.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.51.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.51.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.52.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.52.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.53.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.53.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.54.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.54.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.56.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.56.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.57.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.57.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.58.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.58.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.60.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.60.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.61.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.61.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.63.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.63.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.64.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.64.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.65.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.65.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.66.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.66.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.67.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.67.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.68.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.68.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.69.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.69.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-1666 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-1666\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.1-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.1-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.10-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.10-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.11-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.11-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.12-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.12-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.13-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.13-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.14-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.14-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.15-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.15-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.16-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.16-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.17-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.17-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.18-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.18-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.2-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.2-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.21-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.21-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.22-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.22-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.23-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.23-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.3-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.3-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.5-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.5-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.6-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.6-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.7-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.7-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.8-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.8-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.9-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.9-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.10.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.10.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.11.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.11.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.13.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.13.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.14.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.14.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.15.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.15.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.16.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.16.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.17.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.17.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.18.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.18.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.19.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.19.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.20.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.20.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.21.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.21.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.22.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.22.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.23.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.23.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.24.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.24.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.25.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.25.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.26.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.26.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.27.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.27.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.28.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.28.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.30.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.30.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.32.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.32.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.5.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.5.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.7.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.7.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.8.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.8.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.9.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.9.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.100-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.100-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.104-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.104-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.105-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.105-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.106-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.106-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.107-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.107-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.108-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.108-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.109-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.109-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.11-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.11-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.110-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.110-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.111-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.111-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.112-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.112-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.113-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.113-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.114-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.114-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.115-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.115-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.116-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.116-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.117-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.117-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.119-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.119-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.12-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.12-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.121-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.121-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.122-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.122-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.123-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.123-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.124-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.124-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.125-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.125-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.126-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.126-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.127-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.127-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.128-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.128-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.129-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.129-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.13-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.13-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.130-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.130-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.131-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.131-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.132-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.132-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.133-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.133-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.134-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.134-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.135-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.135-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.136-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.136-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.137-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.137-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.138-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.138-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.139-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.139-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.14-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.14-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.140-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.140-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.141-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.141-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.142-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.142-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.15-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.15-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.16-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.16-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.17-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.17-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.18-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.18-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.19-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.19-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.20-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.20-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.21-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.21-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.22-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.22-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.23-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.23-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.24-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.24-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.25-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.25-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.26-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.26-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.27-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.27-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.28-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.28-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.29-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.29-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.3-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.3-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.30-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.30-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.31-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.31-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.32-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.32-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.33-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.33-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.34-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.34-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.35-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.35-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.36-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.36-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.37-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.37-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.38-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.38-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.39-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.39-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.4-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.4-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.40-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.40-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.41-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.41-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.42-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.42-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.43-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.43-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.44-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.44-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.45-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.45-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.46-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.46-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.47-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.47-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.49-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.49-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.5-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.5-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.50-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.50-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.51-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.51-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.53-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.53-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.54-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.54-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.55-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.55-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.56-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.56-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.57-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.57-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.58-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.58-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.59-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.59-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.6-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.6-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.60-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.60-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.61-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.61-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.62-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.62-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.63-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.63-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.64-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.64-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.65-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.65-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.66-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.66-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.67-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.67-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.69-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.69-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.7-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.7-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.71-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.71-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.72-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.72-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.73-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.73-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.74-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.74-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.75-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.75-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.76-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.76-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.77-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.77-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.78-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.78-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.79-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.79-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.8-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.8-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.81-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.81-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.82-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.82-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.83-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.83-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.85-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.85-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.86-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.86-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.87-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.87-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.88-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.88-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.9-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.9-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.90-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.90-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.91-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.91-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.92-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.92-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.93-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.93-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.94-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.94-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.95-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.95-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.96-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.96-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.97-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.97-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.98-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.98-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.99-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.99-1\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.10.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.10.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.100.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.100.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.101.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.101.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.102.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.102.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.103.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.103.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.104.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.104.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.105.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.105.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.106.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.106.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.107.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.107.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.109.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.109.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.110.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.110.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.112.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.112.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.114.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.114.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.115.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.115.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.116.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.116.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.117.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.117.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.118.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.118.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.119.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.119.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.12.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.12.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.122.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.122.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.123.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.123.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.124.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.124.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.125.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.125.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.128.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.128.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.13.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.13.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.130.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.130.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.131.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.131.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.132.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.132.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.136.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.136.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.137.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.137.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.138.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.138.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.139.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.139.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.14.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.14.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.140.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.140.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.141.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.141.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.142.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.142.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.143.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.143.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.144.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.144.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.145.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.145.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.146.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.146.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.147.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.147.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.148.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.148.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.149.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.149.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.15.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.15.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.17.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.17.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.18.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.18.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.19.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.19.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.20.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.20.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.21.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.21.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.22.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.22.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.23.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.23.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.24.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.24.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.25.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.25.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.26.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.26.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.27.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.27.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.28.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.28.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.29.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.29.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.30.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.30.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.31.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.31.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.32.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.32.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.33.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.33.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.36.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.36.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.37.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.37.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.38.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.38.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.39.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.39.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.4.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.4.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.40.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.40.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.41.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.41.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.42.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.42.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.43.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.43.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.44.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.44.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.45.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.45.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.46.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.46.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.47.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.47.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.48.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.48.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.49.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.49.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.5.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.5.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.50.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.50.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.51.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.51.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.52.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.52.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.55.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.55.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.56.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.56.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.57.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.57.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.58.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.58.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.59.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.59.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.6.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.6.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.60.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.60.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.62.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.62.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.63.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.63.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.66.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.66.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.69.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.69.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.7.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.7.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.70.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.70.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.72.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.72.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.74.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.74.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.75.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.75.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.76.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.76.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.77.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.77.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.79.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.79.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.8.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.8.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.80.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.80.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.81.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.81.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.82.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.82.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.83.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.83.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.84.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.84.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.85.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.85.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.87.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.87.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.88.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.88.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.89.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.89.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.9.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.9.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.91.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.91.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.92.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.92.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.93.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.93.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.94.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.94.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.95.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.95.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.96.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.96.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.97.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.97.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.98.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.98.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.99.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.99.0\n * [new tag] cluster-autoscaler-0.5.2 -\u003e cluster-autoscaler-0.5.2\n * [new tag] cluster-autoscaler-0.5.3 -\u003e cluster-autoscaler-0.5.3\n * [new tag] cluster-autoscaler-0.5.4 -\u003e cluster-autoscaler-0.5.4\n * [new tag] cluster-autoscaler-0.6.0 -\u003e cluster-autoscaler-0.6.0\n * [new tag] cluster-autoscaler-0.6.1 -\u003e cluster-autoscaler-0.6.1\n * [new tag] cluster-autoscaler-0.6.2 -\u003e cluster-autoscaler-0.6.2\n * [new tag] cluster-autoscaler-0.6.3 -\u003e cluster-autoscaler-0.6.3\n * [new tag] cluster-autoscaler-0.6.4 -\u003e cluster-autoscaler-0.6.4\n * [new tag] cluster-autoscaler-1.0.0 -\u003e cluster-autoscaler-1.0.0\n * [new tag] cluster-autoscaler-1.0.1 -\u003e cluster-autoscaler-1.0.1\n * [new tag] cluster-autoscaler-1.0.2 -\u003e cluster-autoscaler-1.0.2\n * [new tag] cluster-autoscaler-1.0.3 -\u003e cluster-autoscaler-1.0.3\n * [new tag] cluster-autoscaler-1.0.4 -\u003e cluster-autoscaler-1.0.4\n * [new tag] cluster-autoscaler-1.0.5 -\u003e cluster-autoscaler-1.0.5\n * [new tag] cluster-autoscaler-1.1.0 -\u003e cluster-autoscaler-1.1.0\n * [new tag] cluster-autoscaler-1.1.1 -\u003e cluster-autoscaler-1.1.1\n * [new tag] cluster-autoscaler-1.1.2 -\u003e cluster-autoscaler-1.1.2\n * [new tag] cluster-autoscaler-1.2.0 -\u003e cluster-autoscaler-1.2.0\n * [new tag] cluster-autoscaler-1.2.1 -\u003e cluster-autoscaler-1.2.1\n * [new tag] cluster-autoscaler-1.2.2 -\u003e cluster-autoscaler-1.2.2\n * [new tag] v3.10.0 -\u003e v3.10.0\n * [new tag] v3.10.0-alpha.0 -\u003e v3.10.0-alpha.0\n * [new tag] v3.10.0-rc.0 -\u003e v3.10.0-rc.0\n * [new tag] v3.11 -\u003e v3.11\n * [new tag] v3.11.0 -\u003e v3.11.0\n * [new tag] v3.11.0-alpha.0 -\u003e v3.11.0-alpha.0\n * [new tag] vertical-pod-autoscaler-0.1 -\u003e vertical-pod-autoscaler-0.1\n","time":"2019-09-04T15:57:56Z"} {"command":"git fetch https://github.com/openshift/kubernetes-autoscaler.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/kubernetes-autoscaler\n * branch master -\u003e FETCH_HEAD\n","time":"2019-09-04T15:57:56Z"} {"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 18a08df11... Merge pull request #114 from ingvagabund/goimports-makefile\n","time":"2019-09-04T15:57:58Z"} {"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:58Z"} {"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-04T15:57:59Z"} {"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T15:57:59Z"} {"component":"clonerefs","file":"prow/cmd/clonerefs/main.go:43","func":"main.main","level":"info","msg":"Finished cloning refs","time":"2019-09-04T15:57:59Z"} + docker run -e 'JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-cluster-api-actuator-pkg-master-e2e","buildid":"548","prowjobid":"47aee29c-cf2c-11e9-bd8d-0a58ac102ebf","refs":{"org":"openshift","repo":"cluster-api-actuator-pkg","repo_link":"https://github.com/openshift/cluster-api-actuator-pkg","base_ref":"master","base_sha":"b3dad42ded9cf0288809ca2cef3311c06339e749","base_link":"https://github.com/openshift/cluster-api-actuator-pkg/commit/b3dad42ded9cf0288809ca2cef3311c06339e749","pulls":[{"number":114,"author":"frobware","sha":"801a89559c2b745e456c44495e502136bfd3391b","link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114","commit_link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114/commits/801a89559c2b745e456c44495e502136bfd3391b","author_link":"https://github.com/frobware"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/initupload:latest --clone-log=/data/clone.json --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-actuator-pkg/114/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/548/started.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T15:58:02Z"} {"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/548.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T15:58:02Z"} {"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T15:58:02Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-actuator-pkg/114/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T15:58:02Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-actuator-pkg/114/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/548/clone-log.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T15:58:02Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-actuator-pkg/114/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/548/clone-records.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T15:58:02Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-actuator-pkg/114/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/548/started.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T15:58:02Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-actuator-pkg/114/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T15:58:02Z"} {"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/548.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T15:58:02Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-actuator-pkg/114/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/548/clone-log.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T15:58:02Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-actuator-pkg/114/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/548/clone-records.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T15:58:02Z"} {"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T15:58:02Z"} {"component":"initupload","file":"prow/gcsupload/run.go:65","func":"k8s.io/test-infra/prow/gcsupload.Options.Run","level":"info","msg":"Finished upload to GCS","time":"2019-09-04T15:58:02Z"} + sudo chmod -R a+rwX /data + sudo chown -R origin:origin-git /data + set +o xtrace ########## FINISHED STAGE: SUCCESS: SYNC REPOSITORIES [00h 01m 22s] ########## [workspace] $ /bin/bash /tmp/jenkins120665663809988947.sh ########## STARTING STAGE: FORWARD PARAMETERS TO THE REMOTE HOST ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod o+rw /etc/environment + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_NAME=pull-ci-openshift-cluster-api-actuator-pkg-master-e2e'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_NUMBER=548'\'' >> /etc/environment' + set +o xtrace ########## FINISHED STAGE: SUCCESS: FORWARD PARAMETERS TO THE REMOTE HOST [00h 00m 01s] ########## [workspace] $ /bin/bash /tmp/jenkins1506870550703915256.sh ########## STARTING STAGE: UPLOAD THE DEFAULT AWS CREDENTIASL ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'mkdir -p ~/.aws' + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.aws/credentials 'openshiftdevel:~/.aws' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'mkdir -p ~/.ssh' + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.ssh/devenv.pem 'openshiftdevel:~/.ssh/devenv.pem' + set +o xtrace ########## FINISHED STAGE: SUCCESS: UPLOAD THE DEFAULT AWS CREDENTIASL [00h 00m 02s] ########## [workspace] $ /bin/bash /tmp/jenkins1913780826052124398.sh ########## STARTING STAGE: INSTALL MINIKUBE ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.sJEqVLnRHP + cat + chmod +x /tmp/tmp.sJEqVLnRHP + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.sJEqVLnRHP openshiftdevel:/tmp/tmp.sJEqVLnRHP + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.sJEqVLnRHP"' + cd /home/origin + curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.30.0/minikube-linux-amd64 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 9 40.3M 9 4112k 0 0 4106k 0 0:00:10 0:00:01 0:00:09 4112k 100 40.3M 100 40.3M 0 0 29.1M 0 0:00:01 0:00:01 --:--:-- 29.1M + chmod +x minikube + sudo mv minikube /usr/bin/ + curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.10.0/bin/linux/amd64/kubectl % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 23 51.7M 23 12.0M 0 0 22.6M 0 0:00:02 --:--:-- 0:00:02 22.6M 100 51.7M 100 51.7M 0 0 55.9M 0 --:--:-- --:--:-- --:--:-- 55.9M + chmod +x kubectl + sudo mv kubectl /usr/bin/ + sudo yum install -y ebtables Loaded plugins: amazon-id, rhui-lb, search-disabled-repos Resolving Dependencies --> Running transaction check ---> Package ebtables.x86_64 0:2.0.10-16.el7 will be installed --> Finished Dependency Resolution Dependencies Resolved ================================================================================ Package Arch Version Repository Size ================================================================================ Installing: ebtables x86_64 2.0.10-16.el7 oso-rhui-rhel-server-releases 123 k Transaction Summary ================================================================================ Install 1 Package Total download size: 123 k Installed size: 343 k Downloading packages: Running transaction check Running transaction test Transaction test succeeded Running transaction Installing : ebtables-2.0.10-16.el7.x86_64 1/1 Verifying : ebtables-2.0.10-16.el7.x86_64 1/1 Installed: ebtables.x86_64 0:2.0.10-16.el7 Complete! + VERSION=v1.13.0 + wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz --2019-09-04 15:59:35-- https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz Resolving github.com (github.com)... 192.30.253.112 Connecting to github.com (github.com)|192.30.253.112|:443... connected. HTTP request sent, awaiting response... 302 Found Location: https://github-production-release-asset-2e65be.s3.amazonaws.com/80172100/61627180-fed9-11e8-9958-15e7eb90aa9e?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20190904%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20190904T155935Z&X-Amz-Expires=300&X-Amz-Signature=ee9891426c6ab5ecbcc0ada8545cfd3fb09d50cd37f8de63e69cc24a569d985b&X-Amz-SignedHeaders=host&actor_id=0&response-content-disposition=attachment%3B%20filename%3Dcrictl-v1.13.0-linux-amd64.tar.gz&response-content-type=application%2Foctet-stream [following] --2019-09-04 15:59:35-- https://github-production-release-asset-2e65be.s3.amazonaws.com/80172100/61627180-fed9-11e8-9958-15e7eb90aa9e?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20190904%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20190904T155935Z&X-Amz-Expires=300&X-Amz-Signature=ee9891426c6ab5ecbcc0ada8545cfd3fb09d50cd37f8de63e69cc24a569d985b&X-Amz-SignedHeaders=host&actor_id=0&response-content-disposition=attachment%3B%20filename%3Dcrictl-v1.13.0-linux-amd64.tar.gz&response-content-type=application%2Foctet-stream Resolving github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)... 54.231.50.67 Connecting to github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)|54.231.50.67|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 10631149 (10M) [application/octet-stream] Saving to: ‘crictl-v1.13.0-linux-amd64.tar.gz’ 0K .......... .......... .......... .......... .......... 0% 95.2M 0s 50K .......... .......... .......... .......... .......... 0% 113M 0s 100K .......... .......... .......... .......... .......... 1% 70.6M 0s 150K .......... .......... .......... .......... .......... 1% 101M 0s 200K .......... .......... .......... .......... .......... 2% 92.8M 0s 250K .......... .......... .......... .......... .......... 2% 71.7M 0s 300K .......... .......... .......... .......... .......... 3% 29.4M 0s 350K .......... .......... .......... .......... .......... 3% 65.3M 0s 400K .......... .......... .......... .......... .......... 4% 42.9M 0s 450K .......... .......... .......... .......... .......... 4% 74.7M 0s 500K .......... .......... .......... .......... .......... 5% 54.9M 0s 550K .......... .......... .......... .......... .......... 5% 51.5M 0s 600K .......... .......... .......... .......... .......... 6% 73.7M 0s 650K .......... .......... .......... .......... .......... 6% 51.8M 0s 700K .......... .......... .......... .......... .......... 7% 57.5M 0s 750K .......... .......... .......... .......... .......... 7% 79.3M 0s 800K .......... .......... .......... .......... .......... 8% 84.9M 0s 850K .......... .......... .......... .......... .......... 8% 82.3M 0s 900K .......... .......... .......... .......... .......... 9% 98.9M 0s 950K .......... .......... .......... .......... .......... 9% 77.4M 0s 1000K .......... .......... .......... .......... .......... 10% 93.6M 0s 1050K .......... .......... .......... .......... .......... 10% 86.5M 0s 1100K .......... .......... .......... .......... .......... 11% 92.5M 0s 1150K .......... .......... .......... .......... .......... 11% 93.8M 0s 1200K .......... .......... .......... .......... .......... 12% 74.9M 0s 1250K .......... .......... .......... .......... .......... 12% 84.9M 0s 1300K .......... .......... .......... .......... .......... 13% 94.6M 0s 1350K .......... .......... .......... .......... .......... 13% 97.2M 0s 1400K .......... .......... .......... .......... .......... 13% 97.3M 0s 1450K .......... .......... .......... .......... .......... 14% 86.0M 0s 1500K .......... .......... .......... .......... .......... 14% 92.8M 0s 1550K .......... .......... .......... .......... .......... 15% 107M 0s 1600K .......... .......... .......... .......... .......... 15% 80.6M 0s 1650K .......... .......... .......... .......... .......... 16% 98.7M 0s 1700K .......... .......... .......... .......... .......... 16% 99.5M 0s 1750K .......... .......... .......... .......... .......... 17% 136M 0s 1800K .......... .......... .......... .......... .......... 17% 117M 0s 1850K .......... .......... .......... .......... .......... 18% 84.7M 0s 1900K .......... .......... .......... .......... .......... 18% 86.8M 0s 1950K .......... .......... .......... .......... .......... 19% 76.9M 0s 2000K .......... .......... .......... .......... .......... 19% 71.8M 0s 2050K .......... .......... .......... .......... .......... 20% 75.4M 0s 2100K .......... .......... .......... .......... .......... 20% 125M 0s 2150K .......... .......... .......... .......... .......... 21% 86.4M 0s 2200K .......... .......... .......... .......... .......... 21% 113M 0s 2250K .......... .......... .......... .......... .......... 22% 145M 0s 2300K .......... .......... .......... .......... .......... 22% 82.9M 0s 2350K .......... .......... .......... .......... .......... 23% 78.5M 0s 2400K .......... .......... .......... .......... .......... 23% 86.3M 0s 2450K .......... .......... .......... .......... .......... 24% 102M 0s 2500K .......... .......... .......... .......... .......... 24% 111M 0s 2550K .......... .......... .......... .......... .......... 25% 100M 0s 2600K .......... .......... .......... .......... .......... 25% 100M 0s 2650K .......... .......... .......... .......... .......... 26% 90.7M 0s 2700K .......... .......... .......... .......... .......... 26% 95.9M 0s 2750K .......... .......... .......... .......... .......... 26% 145M 0s 2800K .......... .......... .......... .......... .......... 27% 128M 0s 2850K .......... .......... .......... .......... .......... 27% 85.5M 0s 2900K .......... .......... .......... .......... .......... 28% 85.2M 0s 2950K .......... .......... .......... .......... .......... 28% 83.4M 0s 3000K .......... .......... .......... .......... .......... 29% 97.2M 0s 3050K .......... .......... .......... .......... .......... 29% 84.2M 0s 3100K .......... .......... .......... .......... .......... 30% 84.0M 0s 3150K .......... .......... .......... .......... .......... 30% 83.1M 0s 3200K .......... .......... .......... .......... .......... 31% 87.9M 0s 3250K .......... .......... .......... .......... .......... 31% 90.9M 0s 3300K .......... .......... .......... .......... .......... 32% 126M 0s 3350K .......... .......... .......... .......... .......... 32% 90.2M 0s 3400K .......... .......... .......... .......... .......... 33% 96.6M 0s 3450K .......... .......... .......... .......... .......... 33% 96.9M 0s 3500K .......... .......... .......... .......... .......... 34% 91.7M 0s 3550K .......... .......... .......... .......... .......... 34% 88.6M 0s 3600K .......... .......... .......... .......... .......... 35% 81.6M 0s 3650K .......... .......... .......... .......... .......... 35% 98.0M 0s 3700K .......... .......... .......... .......... .......... 36% 92.5M 0s 3750K .......... .......... .......... .......... .......... 36% 96.6M 0s 3800K .......... .......... .......... .......... .......... 37% 88.9M 0s 3850K .......... .......... .......... .......... .......... 37% 99.4M 0s 3900K .......... .......... .......... .......... .......... 38% 88.2M 0s 3950K .......... .......... .......... .......... .......... 38% 96.0M 0s 4000K .......... .......... .......... .......... .......... 39% 86.3M 0s 4050K .......... .......... .......... .......... .......... 39% 94.0M 0s 4100K .......... .......... .......... .......... .......... 39% 93.0M 0s 4150K .......... .......... .......... .......... .......... 40% 97.4M 0s 4200K .......... .......... .......... .......... .......... 40% 93.7M 0s 4250K .......... .......... .......... .......... .......... 41% 99.4M 0s 4300K .......... .......... .......... .......... .......... 41% 100M 0s 4350K .......... .......... .......... .......... .......... 42% 89.7M 0s 4400K .......... .......... .......... .......... .......... 42% 90.6M 0s 4450K .......... .......... .......... .......... .......... 43% 101M 0s 4500K .......... .......... .......... .......... .......... 43% 88.2M 0s 4550K .......... .......... .......... .......... .......... 44% 95.7M 0s 4600K .......... .......... .......... .......... .......... 44% 90.8M 0s 4650K .......... .......... .......... .......... .......... 45% 96.6M 0s 4700K .......... .......... .......... .......... .......... 45% 93.9M 0s 4750K .......... .......... .......... .......... .......... 46% 87.5M 0s 4800K .......... .......... .......... .......... .......... 46% 90.2M 0s 4850K .......... .......... .......... .......... .......... 47% 95.0M 0s 4900K .......... .......... .......... .......... .......... 47% 95.9M 0s 4950K .......... .......... .......... .......... .......... 48% 88.2M 0s 5000K .......... .......... .......... .......... .......... 48% 94.6M 0s 5050K .......... .......... .......... .......... .......... 49% 88.2M 0s 5100K .......... .......... .......... .......... .......... 49% 101M 0s 5150K .......... .......... .......... .......... .......... 50% 104M 0s 5200K .......... .......... .......... .......... .......... 50% 99.8M 0s 5250K .......... .......... .......... .......... .......... 51% 93.0M 0s 5300K .......... .......... .......... .......... .......... 51% 96.6M 0s 5350K .......... .......... .......... .......... .......... 52% 87.3M 0s 5400K .......... .......... .......... .......... .......... 52% 100M 0s 5450K .......... .......... .......... .......... .......... 52% 93.9M 0s 5500K .......... .......... .......... .......... .......... 53% 88.6M 0s 5550K .......... .......... .......... .......... .......... 53% 104M 0s 5600K .......... .......... .......... .......... .......... 54% 93.8M 0s 5650K .......... .......... .......... .......... .......... 54% 90.3M 0s 5700K .......... .......... .......... .......... .......... 55% 96.9M 0s 5750K .......... .......... .......... .......... .......... 55% 87.8M 0s 5800K .......... .......... .......... .......... .......... 56% 105M 0s 5850K .......... .......... .......... .......... .......... 56% 91.1M 0s 5900K .......... .......... .......... .......... .......... 57% 93.6M 0s 5950K .......... .......... .......... .......... .......... 57% 105M 0s 6000K .......... .......... .......... .......... .......... 58% 104M 0s 6050K .......... .......... .......... .......... .......... 58% 92.6M 0s 6100K .......... .......... .......... .......... .......... 59% 96.1M 0s 6150K .......... .......... .......... .......... .......... 59% 90.6M 0s 6200K .......... .......... .......... .......... .......... 60% 100M 0s 6250K .......... .......... .......... .......... .......... 60% 92.2M 0s 6300K .......... .......... .......... .......... .......... 61% 100M 0s 6350K .......... .......... .......... .......... .......... 61% 85.2M 0s 6400K .......... .......... .......... .......... .......... 62% 92.0M 0s 6450K .......... .......... .......... .......... .......... 62% 99.1M 0s 6500K .......... .......... .......... .......... .......... 63% 92.9M 0s 6550K .......... .......... .......... .......... .......... 63% 90.8M 0s 6600K .......... .......... .......... .......... .......... 64% 24.4M 0s 6650K .......... .......... .......... .......... .......... 64% 84.6M 0s 6700K .......... .......... .......... .......... .......... 65% 102M 0s 6750K .......... .......... .......... .......... .......... 65% 95.5M 0s 6800K .......... .......... .......... .......... .......... 65% 108M 0s 6850K .......... .......... .......... .......... .......... 66% 97.4M 0s 6900K .......... .......... .......... .......... .......... 66% 103M 0s 6950K .......... .......... .......... .......... .......... 67% 90.6M 0s 7000K .......... .......... .......... .......... .......... 67% 92.2M 0s 7050K .......... .......... .......... .......... .......... 68% 91.6M 0s 7100K .......... .......... .......... .......... .......... 68% 102M 0s 7150K .......... .......... .......... .......... .......... 69% 88.2M 0s 7200K .......... .......... .......... .......... .......... 69% 97.5M 0s 7250K .......... .......... .......... .......... .......... 70% 99.2M 0s 7300K .......... .......... .......... .......... .......... 70% 97.0M 0s 7350K .......... .......... .......... .......... .......... 71% 95.5M 0s 7400K .......... .......... .......... .......... .......... 71% 98.9M 0s 7450K .......... .......... .......... .......... .......... 72% 92.0M 0s 7500K .......... .......... .......... .......... .......... 72% 98.9M 0s 7550K .......... .......... .......... .......... .......... 73% 94.4M 0s 7600K .......... .......... .......... .......... .......... 73% 97.8M 0s 7650K .......... .......... .......... .......... .......... 74% 103M 0s 7700K .......... .......... .......... .......... .......... 74% 114M 0s 7750K .......... .......... .......... .......... .......... 75% 90.9M 0s 7800K .......... .......... .......... .......... .......... 75% 93.0M 0s 7850K .......... .......... .......... .......... .......... 76% 88.0M 0s 7900K .......... .......... .......... .......... .......... 76% 97.1M 0s 7950K .......... .......... .......... .......... .......... 77% 89.1M 0s 8000K .......... .......... .......... .......... .......... 77% 97.0M 0s 8050K .......... .......... .......... .......... .......... 78% 97.8M 0s 8100K .......... .......... .......... .......... .......... 78% 97.4M 0s 8150K .......... .......... .......... .......... .......... 78% 98.5M 0s 8200K .......... .......... .......... .......... .......... 79% 98.2M 0s 8250K .......... .......... .......... .......... .......... 79% 92.0M 0s 8300K .......... .......... .......... .......... .......... 80% 93.4M 0s 8350K .......... .......... .......... .......... .......... 80% 97.3M 0s 8400K .......... .......... .......... .......... .......... 81% 77.0M 0s 8450K .......... .......... .......... .......... .......... 81% 75.9M 0s 8500K .......... .......... .......... .......... .......... 82% 94.2M 0s 8550K .......... .......... .......... .......... .......... 82% 91.8M 0s 8600K .......... .......... .......... .......... .......... 83% 75.3M 0s 8650K .......... .......... .......... .......... .......... 83% 89.7M 0s 8700K .......... .......... .......... .......... .......... 84% 71.0M 0s 8750K .......... .......... .......... .......... .......... 84% 81.3M 0s 8800K .......... .......... .......... .......... .......... 85% 82.4M 0s 8850K .......... .......... .......... .......... .......... 85% 69.2M 0s 8900K .......... .......... .......... .......... .......... 86% 83.5M 0s 8950K .......... .......... .......... .......... .......... 86% 83.3M 0s 9000K .......... .......... .......... .......... .......... 87% 76.7M 0s 9050K .......... .......... .......... .......... .......... 87% 84.9M 0s 9100K .......... .......... .......... .......... .......... 88% 74.8M 0s 9150K .......... .......... .......... .......... .......... 88% 4.62M 0s 9200K .......... .......... .......... .......... .......... 89% 105M 0s 9250K .......... .......... .......... .......... .......... 89% 94.8M 0s 9300K .......... .......... .......... .......... .......... 90% 76.9M 0s 9350K .......... .......... .......... .......... .......... 90% 83.4M 0s 9400K .......... .......... .......... .......... .......... 91% 74.3M 0s 9450K .......... .......... .......... .......... .......... 91% 75.9M 0s 9500K .......... .......... .......... .......... .......... 91% 82.2M 0s 9550K .......... .......... .......... .......... .......... 92% 76.7M 0s 9600K .......... .......... .......... .......... .......... 92% 97.7M 0s 9650K .......... .......... .......... .......... .......... 93% 116M 0s 9700K .......... .......... .......... .......... .......... 93% 89.2M 0s 9750K .......... .......... .......... .......... .......... 94% 110M 0s 9800K .......... .......... .......... .......... .......... 94% 114M 0s 9850K .......... .......... .......... .......... .......... 95% 170M 0s 9900K .......... .......... .......... .......... .......... 95% 147M 0s 9950K .......... .......... .......... .......... .......... 96% 147M 0s 10000K .......... .......... .......... .......... .......... 96% 177M 0s 10050K .......... .......... .......... .......... .......... 97% 166M 0s 10100K .......... .......... .......... .......... .......... 97% 144M 0s 10150K .......... .......... .......... .......... .......... 98% 153M 0s 10200K .......... .......... .......... .......... .......... 98% 142M 0s 10250K .......... .......... .......... .......... .......... 99% 144M 0s 10300K .......... .......... .......... .......... .......... 99% 154M 0s 10350K .......... .......... .......... . 100% 189M=0.1s 2019-09-04 15:59:36 (82.5 MB/s) - ‘crictl-v1.13.0-linux-amd64.tar.gz’ saved [10631149/10631149] + sudo tar zxvf crictl-v1.13.0-linux-amd64.tar.gz -C /usr/bin crictl + rm -f crictl-v1.13.0-linux-amd64.tar.gz + set +o xtrace ########## FINISHED STAGE: SUCCESS: INSTALL MINIKUBE [00h 01m 28s] ########## [workspace] $ /bin/bash /tmp/jenkins4457867079263457072.sh ########## STARTING STAGE: DEPLOY KUBERNETES ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.tIBDimlcBC + cat + chmod +x /tmp/tmp.tIBDimlcBC + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.tIBDimlcBC openshiftdevel:/tmp/tmp.tIBDimlcBC + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.tIBDimlcBC"' + cd /home/origin + sudo setenforce 0 + sudo minikube start --vm-driver=none --extra-config=kubelet.cgroup-driver=systemd --kubernetes-version v1.12.0 --v 5 There is a newer version of minikube available (v1.3.1). Download it here: https://github.com/kubernetes/minikube/releases/tag/v1.3.1 To disable this notification, run the following: minikube config set WantUpdateNotification false Starting local Kubernetes v1.12.0 cluster... Starting VM... Creating CA: /root/.minikube/certs/ca.pem Creating client certificate: /root/.minikube/certs/cert.pem Getting VM IP address... Moving files into cluster... Downloading kubeadm v1.12.0 Downloading kubelet v1.12.0 Finished Downloading kubeadm v1.12.0 Finished Downloading kubelet v1.12.0 Setting up certs... Connecting to cluster... Setting up kubeconfig... Starting cluster components... Kubectl is now configured to use the cluster. =================== WARNING: IT IS RECOMMENDED NOT TO RUN THE NONE DRIVER ON PERSONAL WORKSTATIONS The 'none' driver will run an insecure kubernetes apiserver as root that may leave the host vulnerable to CSRF attacks When using the none driver, the kubectl config and credentials generated will be root owned and will appear in the root home directory. You will need to move the files to the appropriate location and then set the correct permissions. An example of this is below: sudo mv /root/.kube $HOME/.kube # this will write over any previous configuration sudo chown -R $USER $HOME/.kube sudo chgrp -R $USER $HOME/.kube sudo mv /root/.minikube $HOME/.minikube # this will write over any previous configuration sudo chown -R $USER $HOME/.minikube sudo chgrp -R $USER $HOME/.minikube This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true Loading cached images from config file. + sudo cp /etc/ssl/certs/ca-bundle.crt /etc/ssl/certs/ca-certificates.crt + set +o xtrace ########## FINISHED STAGE: SUCCESS: DEPLOY KUBERNETES [00h 01m 05s] ########## [workspace] $ /bin/bash /tmp/jenkins83986314587075253.sh ########## STARTING STAGE: INSTALL KUSTOMIZE ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.0DRRojFyB3 + cat + chmod +x /tmp/tmp.0DRRojFyB3 + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.0DRRojFyB3 openshiftdevel:/tmp/tmp.0DRRojFyB3 + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.0DRRojFyB3"' + cd /home/origin + curl -Lo kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v2.1.0/kustomize_2.1.0_linux_amd64 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 618 0 618 0 0 2876 0 --:--:-- --:--:-- --:--:-- 2874 100 22.9M 100 22.9M 0 0 47.1M 0 --:--:-- --:--:-- --:--:-- 47.1M + chmod u+x kustomize + sudo mv kustomize /usr/bin/kustomize + set +o xtrace ########## FINISHED STAGE: SUCCESS: INSTALL KUSTOMIZE [00h 00m 01s] ########## [workspace] $ /bin/bash /tmp/jenkins3587320624113665025.sh ########## STARTING STAGE: INSTALL IMAGEBUILDER ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.xNMwAn5luU + cat + chmod +x /tmp/tmp.xNMwAn5luU + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.xNMwAn5luU openshiftdevel:/tmp/tmp.xNMwAn5luU + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.xNMwAn5luU"' + cd /home/origin + export GOPATH=/data + GOPATH=/data + go get -u github.com/openshift/imagebuilder/cmd/imagebuilder + sudo mv /data/bin/imagebuilder /usr/bin + set +o xtrace ########## FINISHED STAGE: SUCCESS: INSTALL IMAGEBUILDER [00h 00m 25s] ########## [workspace] $ /bin/bash /tmp/jenkins7524708115853677827.sh ########## STARTING STAGE: BUILD KUBEMARK MACHINE CONTROLLERS ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.tcbrcx43wy + cat + chmod +x /tmp/tmp.tcbrcx43wy + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.tcbrcx43wy openshiftdevel:/tmp/tmp.tcbrcx43wy + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.tcbrcx43wy"' + cd /home/origin + export GOPATH=/data + GOPATH=/data + cd /data/src/github.com/openshift/cluster-api-provider-kubemark + sudo make images IMAGE=docker.io/gofed/kubemark-machine-controllers VERSION=v1.0 NO_DOCKER=1 imagebuilder -t "docker.io/gofed/kubemark-machine-controllers:v1.0" -t "docker.io/gofed/kubemark-machine-controllers:latest" ./ --> Image registry.svc.ci.openshift.org/openshift/release:golang-1.10 was not found, pulling ... --> Pulled 0/2 layers, 19% complete --> Pulled 1/2 layers, 73% complete --> Pulled 2/2 layers, 100% complete --> Extracting --> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.10 as builder --> WORKDIR /go/src/github.com/openshift/cluster-api-provider-kubemark --> COPY . . --> RUN go build -o ./machine-controller-manager ./cmd/manager --> RUN go build -o ./manager ./vendor/github.com/openshift/cluster-api/cmd/manager --> Image docker.io/gofed/base:baseci was not found, pulling ... --> Pulled 1/2 layers, 81% complete --> Pulled 2/2 layers, 100% complete --> Extracting --> FROM docker.io/gofed/base:baseci as 1 --> RUN INSTALL_PKGS=" openssh " && yum install -y $INSTALL_PKGS && rpm -V $INSTALL_PKGS && yum clean all && curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x ./kubectl && mv ./kubectl /bin/kubectl && curl -LO https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 && chmod +x ./jq-linux64 && mv ./jq-linux64 /bin/jq Loaded plugins: fastestmirror, ovl Determining fastest mirrors * base: repos-va.psychz.net * extras: repos-va.psychz.net * updates: repos-va.psychz.net Resolving Dependencies --> Running transaction check ---> Package openssh.x86_64 0:7.4p1-16.el7 will be installed --> Processing Dependency: libfipscheck.so.1()(64bit) for package: openssh-7.4p1-16.el7.x86_64 --> Running transaction check ---> Package fipscheck-lib.x86_64 0:1.4.1-6.el7 will be installed --> Processing Dependency: /usr/bin/fipscheck for package: fipscheck-lib-1.4.1-6.el7.x86_64 --> Running transaction check ---> Package fipscheck.x86_64 0:1.4.1-6.el7 will be installed --> Finished Dependency Resolution Dependencies Resolved ================================================================================ Package Arch Version Repository Size ================================================================================ Installing: openssh x86_64 7.4p1-16.el7 base 510 k Installing for dependencies: fipscheck x86_64 1.4.1-6.el7 base 21 k fipscheck-lib x86_64 1.4.1-6.el7 base 11 k Transaction Summary ================================================================================ Install 1 Package (+2 Dependent packages) Total download size: 542 k Installed size: 2.0 M Downloading packages: -------------------------------------------------------------------------------- Total 4.5 MB/s | 542 kB 00:00 Running transaction check Running transaction test Transaction test succeeded Running transaction Installing : fipscheck-1.4.1-6.el7.x86_64 1/3 Installing : fipscheck-lib-1.4.1-6.el7.x86_64 2/3 Installing : openssh-7.4p1-16.el7.x86_64 3/3 Verifying : fipscheck-lib-1.4.1-6.el7.x86_64 1/3 Verifying : fipscheck-1.4.1-6.el7.x86_64 2/3 Verifying : openssh-7.4p1-16.el7.x86_64 3/3 Installed: openssh.x86_64 0:7.4p1-16.el7 Dependency Installed: fipscheck.x86_64 0:1.4.1-6.el7 fipscheck-lib.x86_64 0:1.4.1-6.el7 Complete! Loaded plugins: fastestmirror, ovl Cleaning repos: base cbs-paas7-openshift-multiarch-el7-build extras updates Cleaning up list of fastest mirrors % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 40.9M 0 638 0 0 3562 0 3:21:07 --:--:-- 3:21:07 3564 100 40.9M 100 40.9M 0 0 68.0M 0 --:--:-- --:--:-- --:--:-- 67.9M % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 599 0 599 0 0 2719 0 --:--:-- --:--:-- --:--:-- 2722 100 2956k 100 2956k 0 0 10.2M 0 --:--:-- --:--:-- --:--:-- 10.2M --> COPY --from=builder /go/src/github.com/openshift/cluster-api-provider-kubemark/manager / --> COPY --from=builder /go/src/github.com/openshift/cluster-api-provider-kubemark/machine-controller-manager / --> Committing changes to docker.io/gofed/kubemark-machine-controllers:v1.0 ... --> Tagged as docker.io/gofed/kubemark-machine-controllers:latest --> Done + set +o xtrace ########## FINISHED STAGE: SUCCESS: BUILD KUBEMARK MACHINE CONTROLLERS [00h 01m 36s] ########## [workspace] $ /bin/bash /tmp/jenkins8234080803859191338.sh ########## STARTING STAGE: BUILD CLUSTER AUTOSCALER ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.obEVATHWj8 + cat + chmod +x /tmp/tmp.obEVATHWj8 + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.obEVATHWj8 openshiftdevel:/tmp/tmp.obEVATHWj8 + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.obEVATHWj8"' + cd /home/origin + export GOPATH=/data + GOPATH=/data + cd /data/src/github.com/openshift/kubernetes-autoscaler + sudo imagebuilder -f images/cluster-autoscaler/Dockerfile -t docker.io/openshift/origin-cluster-autoscaler:v4.0 . --> Image registry.svc.ci.openshift.org/openshift/release:golang-1.12 was not found, pulling ... --> Pulled 1/2 layers, 65% complete --> Pulled 2/2 layers, 100% complete --> Extracting --> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder --> WORKDIR /go/src/k8s.io/autoscaler --> COPY . . --> RUN go build -o cluster-autoscaler/cluster-autoscaler ./cluster-autoscaler --> Image registry.svc.ci.openshift.org/openshift/origin-v4.0:base was not found, pulling ... --> Pulled 3/4 layers, 75% complete --> Pulled 4/4 layers, 100% complete --> Extracting --> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1 --> COPY --from=builder /go/src/k8s.io/autoscaler/cluster-autoscaler/cluster-autoscaler /usr/bin/ --> CMD /usr/bin/cluster-autoscaler --> LABEL summary="Cluster Autoscaler for OpenShift and Kubernetes" --> Committing changes to docker.io/openshift/origin-cluster-autoscaler:v4.0 ... --> Done + set +o xtrace ########## FINISHED STAGE: SUCCESS: BUILD CLUSTER AUTOSCALER [00h 02m 23s] ########## [workspace] $ /bin/bash /tmp/jenkins2611811646936388176.sh ########## STARTING STAGE: DEPLOY MACHINE API OPERATOR ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.ARCn0qYwTi + cat + chmod +x /tmp/tmp.ARCn0qYwTi + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.ARCn0qYwTi openshiftdevel:/tmp/tmp.ARCn0qYwTi + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.ARCn0qYwTi"' + cd /home/origin + export GOPATH=/data + GOPATH=/data + cd /data/src/github.com/openshift/machine-api-operator + sudo imagebuilder -t docker.io/openshift/origin-machine-api-operator:v4.0.0 . --> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder --> WORKDIR /go/src/github.com/openshift/machine-api-operator --> COPY . . --> RUN NO_DOCKER=1 make build ./hack/go-build.sh machine-api-operator Using version from git... Building github.com/openshift/machine-api-operator/cmd/machine-api-operator (v0.1.0-524-g474e14e4) ./hack/go-build.sh nodelink-controller Using version from git... Building github.com/openshift/machine-api-operator/cmd/nodelink-controller (v0.1.0-524-g474e14e4) ./hack/go-build.sh machine-healthcheck Using version from git... Building github.com/openshift/machine-api-operator/cmd/machine-healthcheck (v0.1.0-524-g474e14e4) --> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1 --> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/install manifests --> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/machine-api-operator . --> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/nodelink-controller . --> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/machine-healthcheck . --> LABEL io.openshift.release.operator true --> Committing changes to docker.io/openshift/origin-machine-api-operator:v4.0.0 ... --> Done + sudo make deploy-kubemark kustomize build config | kubectl apply -f - namespace "kubemark-actuator" created serviceaccount "kubemark" created clusterrole.rbac.authorization.k8s.io "kubemark-actuator-role" created clusterrolebinding.rbac.authorization.k8s.io "kubemark-actuator-rolebinding" created configmap "deleteunreadynodes" created deployment.apps "machineapi-kubemark-controllers" created kustomize build | kubectl apply -f - namespace "openshift-machine-api" created customresourcedefinition.apiextensions.k8s.io "clusteroperators.config.openshift.io" created customresourcedefinition.apiextensions.k8s.io "featuregates.config.openshift.io" created customresourcedefinition.apiextensions.k8s.io "machinedisruptionbudgets.healthchecking.openshift.io" created customresourcedefinition.apiextensions.k8s.io "machinehealthchecks.healthchecking.openshift.io" created customresourcedefinition.apiextensions.k8s.io "machines.machine.openshift.io" created customresourcedefinition.apiextensions.k8s.io "machinesets.machine.openshift.io" created customresourcedefinition.apiextensions.k8s.io "prometheusrules.monitoring.coreos.com" created customresourcedefinition.apiextensions.k8s.io "servicemonitors.monitoring.coreos.com" created serviceaccount "machine-api-controllers" created serviceaccount "machine-api-operator" created role.rbac.authorization.k8s.io "machine-api-controllers" created role.rbac.authorization.k8s.io "machine-api-operator" created role.rbac.authorization.k8s.io "prometheus-k8s-machine-api-operator" created clusterrole.rbac.authorization.k8s.io "machine-api-controllers" created clusterrole.rbac.authorization.k8s.io "machine-api-operator" created rolebinding.rbac.authorization.k8s.io "machine-api-controllers" created rolebinding.rbac.authorization.k8s.io "machine-api-operator" created rolebinding.rbac.authorization.k8s.io "prometheus-k8s-machine-api-operator" created clusterrolebinding.rbac.authorization.k8s.io "machine-api-controllers" created clusterrolebinding.rbac.authorization.k8s.io "machine-api-operator" created configmap "machine-api-operator-images" created service "machine-api-operator" created deployment.apps "machine-api-operator" created clusteroperator.config.openshift.io "machine-api" created kubectl apply -f config/kubemark-config-infra.yaml customresourcedefinition.apiextensions.k8s.io "infrastructures.config.openshift.io" created infrastructure.config.openshift.io "cluster" created + set +o xtrace ########## FINISHED STAGE: SUCCESS: DEPLOY MACHINE API OPERATOR [00h 01m 15s] ########## [workspace] $ /bin/bash /tmp/jenkins3485555691402954088.sh ########## STARTING STAGE: DEPLOY CLUSTER AUTOSCALER OPERATOR ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.meaaBHWlBE + cat + chmod +x /tmp/tmp.meaaBHWlBE + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.meaaBHWlBE openshiftdevel:/tmp/tmp.meaaBHWlBE + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.meaaBHWlBE"' + cd /home/origin + export GOPATH=/data + GOPATH=/data + cd /data/src/github.com/openshift/cluster-autoscaler-operator/ + sudo imagebuilder -t quay.io/openshift/origin-cluster-autoscaler-operator:v4.0 . --> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder --> WORKDIR /go/src/github.com/openshift/cluster-autoscaler-operator --> COPY . . --> ENV NO_DOCKER=1 --> ENV BUILD_DEST=/go/bin/cluster-autoscaler-operator --> RUN unset VERSION && make build go build -ldflags "-X github.com/openshift/cluster-autoscaler-operator/pkg/version.Raw=v0.0.0-213-g045aea4" -o "/go/bin/cluster-autoscaler-operator" "github.com/openshift/cluster-autoscaler-operator/cmd/manager" --> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1 --> COPY --from=builder /go/bin/cluster-autoscaler-operator /usr/bin/ --> COPY --from=builder /go/src/github.com/openshift/cluster-autoscaler-operator/install /manifests --> CMD ["/usr/bin/cluster-autoscaler-operator"] --> LABEL io.openshift.release.operator true --> Committing changes to quay.io/openshift/origin-cluster-autoscaler-operator:v4.0 ... --> Done + kustomize build + sudo kubectl apply -f - customresourcedefinition.apiextensions.k8s.io "clusterautoscalers.autoscaling.openshift.io" created customresourcedefinition.apiextensions.k8s.io "machineautoscalers.autoscaling.openshift.io" created serviceaccount "cluster-autoscaler" created serviceaccount "cluster-autoscaler-operator" created role.rbac.authorization.k8s.io "cluster-autoscaler" created role.rbac.authorization.k8s.io "prometheus-k8s-cluster-autoscaler-operator" created role.rbac.authorization.k8s.io "cluster-autoscaler-operator" created clusterrole.rbac.authorization.k8s.io "cluster-autoscaler" created clusterrole.rbac.authorization.k8s.io "cluster-autoscaler-operator" created rolebinding.rbac.authorization.k8s.io "cluster-autoscaler" created rolebinding.rbac.authorization.k8s.io "prometheus-k8s-cluster-autoscaler-operator" created rolebinding.rbac.authorization.k8s.io "cluster-autoscaler-operator" created clusterrolebinding.rbac.authorization.k8s.io "cluster-autoscaler" created clusterrolebinding.rbac.authorization.k8s.io "cluster-autoscaler-operator" created configmap "cluster-autoscaler-operator-ca" created secret "cluster-autoscaler-operator-cert" created service "cluster-autoscaler-operator" created deployment.apps "cluster-autoscaler-operator" created + set +o xtrace ########## FINISHED STAGE: SUCCESS: DEPLOY CLUSTER AUTOSCALER OPERATOR [00h 00m 46s] ########## [workspace] $ /bin/bash /tmp/jenkins8316023624829877597.sh ########## STARTING STAGE: DEPLOY CLUSTER RESOURCES ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.1mmZAONRdu + cat + chmod +x /tmp/tmp.1mmZAONRdu + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.1mmZAONRdu openshiftdevel:/tmp/tmp.1mmZAONRdu + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.1mmZAONRdu"' + cd /home/origin + export GOPATH=/data + GOPATH=/data + cd /data/src/github.com/openshift/cluster-api-provider-kubemark + sudo kubectl apply -f examples/machine-set.yaml machineset.machine.openshift.io "kubemark-actuator-testing-machineset" created + sudo kubectl apply -f examples/static-machine.yaml machine.machine.openshift.io "minikube-static-machine" created + sudo kubectl apply -f examples/worker-machinesets.yaml machineset.machine.openshift.io "kubemark-actuator-testing-machineset-red" created machineset.machine.openshift.io "kubemark-actuator-testing-machineset-green" created machineset.machine.openshift.io "kubemark-actuator-testing-machineset-blue" created + set +o xtrace ########## FINISHED STAGE: SUCCESS: DEPLOY CLUSTER RESOURCES [00h 00m 01s] ########## [workspace] $ /bin/bash /tmp/jenkins4551526806920267597.sh ########## STARTING STAGE: INSTALL GO 1.10.1 ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.njPLsEJAMo + cat + chmod +x /tmp/tmp.njPLsEJAMo + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.njPLsEJAMo openshiftdevel:/tmp/tmp.njPLsEJAMo + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.njPLsEJAMo"' + cd /home/origin + mkdir -p /home/origin/bin + curl -sL -o /home/origin/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme + chmod +x /home/origin/bin/gimme + gimme 1.10.1 unset GOOS; unset GOARCH; export GOROOT='/home/origin/.gimme/versions/go1.10.1.linux.amd64'; export PATH="/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:${PATH}"; go version >&2; export GIMME_ENV="/home/origin/.gimme/envs/go1.10.1.env" + source /home/origin/.gimme/envs/go1.10.1.env ++ unset GOOS ++ unset GOARCH ++ export GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64 ++ GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64 ++ export PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin ++ PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin ++ go version go version go1.10.1 linux/amd64 + sudo cp /home/origin/.gimme/versions/go1.10.1.linux.amd64/bin/go /bin/go + set +o xtrace ########## FINISHED STAGE: SUCCESS: INSTALL GO 1.10.1 [00h 00m 07s] ########## [workspace] $ /bin/bash /tmp/jenkins3740955529149212768.sh ########## STARTING STAGE: RUN E2E TESTS ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.wZmg6rrmeP + cat + chmod +x /tmp/tmp.wZmg6rrmeP + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.wZmg6rrmeP openshiftdevel:/tmp/tmp.wZmg6rrmeP + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.wZmg6rrmeP"' + cd /home/origin + set +x go version go1.10.1 linux/amd64 # Run operator tests first to preserve logs for troubleshooting test # failures and flakes. # Feature:Operator tests remove deployments. Thus loosing all the logs # previously acquired. NAMESPACE=kube-system hack/ci-integration.sh -ginkgo.v -ginkgo.noColor=true -ginkgo.focus "Feature:Operators" -ginkgo.failFast === RUN TestE2E Running Suite: Machine Suite ============================ Random Seed: 1567613289 Will run 7 of 16 specs [Feature:Operators] Cluster autoscaler operator should reject invalid ClusterAutoscaler resources early via webhook /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:33 I0904 16:08:09.497720 30809 framework.go:406] >>> kubeConfig: /root/.kube/config • ------------------------------ [Feature:Operators] Cluster autoscaler operator should reject invalid MachineAutoscaler resources early via webhook /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:49 I0904 16:08:09.557126 30809 framework.go:406] >>> kubeConfig: /root/.kube/config • ------------------------------ [Feature:Operators] Cluster autoscaler operator deployment should be available /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:79 I0904 16:08:09.576880 30809 framework.go:406] >>> kubeConfig: /root/.kube/config I0904 16:08:09.596491 30809 deloyment.go:58] Deployment "cluster-autoscaler-operator" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0) •S ------------------------------ [Feature:Operators] Machine API cluster operator status should be available /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:53 I0904 16:08:09.596582 30809 framework.go:406] >>> kubeConfig: /root/.kube/config • ------------------------------ [Feature:Operators] Cluster autoscaler cluster operator status should be available /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:90 I0904 16:08:09.608494 30809 framework.go:406] >>> kubeConfig: /root/.kube/config •SS ------------------------------ [Feature:Operators] Machine API operator deployment should be available /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:18 I0904 16:08:09.624195 30809 framework.go:406] >>> kubeConfig: /root/.kube/config I0904 16:08:09.635926 30809 deloyment.go:58] Deployment "machine-api-operator" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0) • ------------------------------ [Feature:Operators] Machine API operator deployment should reconcile controllers deployment /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:25 I0904 16:08:09.635978 30809 framework.go:406] >>> kubeConfig: /root/.kube/config STEP: checking deployment "machine-api-controllers" is available I0904 16:08:09.652880 30809 deloyment.go:58] Deployment "machine-api-controllers" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0) STEP: deleting deployment "machine-api-controllers" STEP: checking deployment "machine-api-controllers" is available again E0904 16:08:09.660282 30809 deloyment.go:25] Error querying api for Deployment object "machine-api-controllers": deployments.apps "machine-api-controllers" not found, retrying... E0904 16:08:10.664898 30809 deloyment.go:55] Deployment "machine-api-controllers" is not available. Status: (replicas: 1, updated: 1, ready: 0, available: 0, unavailable: 1) I0904 16:08:11.668245 30809 deloyment.go:58] Deployment "machine-api-controllers" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0) •SSSSSS Ran 7 of 16 Specs in 2.171 seconds SUCCESS! -- 7 Passed | 0 Failed | 0 Pending | 9 Skipped --- PASS: TestE2E (2.17s) PASS ok github.com/openshift/cluster-api-actuator-pkg/pkg/e2e 2.232s NAMESPACE=kube-system hack/ci-integration.sh -ginkgo.v -ginkgo.noColor=true -ginkgo.skip "Feature:Operators|TechPreview" -ginkgo.failFast -ginkgo.seed=1 === RUN TestE2E Running Suite: Machine Suite ============================ Random Seed: 1 Will run 7 of 16 specs SSSSSSSS ------------------------------ [Feature:Machines] Autoscaler should scale up and down /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:234 I0904 16:08:14.919232 31343 framework.go:406] >>> kubeConfig: /root/.kube/config I0904 16:08:14.924660 31343 framework.go:406] >>> kubeConfig: /root/.kube/config I0904 16:08:14.947914 31343 framework.go:406] >>> kubeConfig: /root/.kube/config STEP: Getting existing machinesets STEP: Getting existing machines STEP: Getting existing nodes I0904 16:08:14.960758 31343 autoscaler.go:286] Have 4 existing machinesets I0904 16:08:14.960781 31343 autoscaler.go:287] Have 5 existing machines I0904 16:08:14.960788 31343 autoscaler.go:288] Have 5 existing nodes STEP: Creating 3 transient machinesets STEP: [15m0s remaining] Waiting for nodes to be Ready in 3 transient machinesets E0904 16:08:14.990278 31343 utils.go:157] Machine "e2e-336b0-w-0-vf6h5" has no NodeRef STEP: [14m57s remaining] Waiting for nodes to be Ready in 3 transient machinesets I0904 16:08:18.006539 31343 utils.go:165] Machine "e2e-336b0-w-0-vf6h5" is backing node "f04d1ba0-086a-426e-b543-563e6e04838a" I0904 16:08:18.006564 31343 utils.go:149] MachineSet "e2e-336b0-w-0" have 1 nodes E0904 16:08:18.011713 31343 utils.go:157] Machine "e2e-336b0-w-1-2f49v" has no NodeRef STEP: [14m54s remaining] Waiting for nodes to be Ready in 3 transient machinesets I0904 16:08:21.019773 31343 utils.go:165] Machine "e2e-336b0-w-0-vf6h5" is backing node "f04d1ba0-086a-426e-b543-563e6e04838a" I0904 16:08:21.019804 31343 utils.go:149] MachineSet "e2e-336b0-w-0" have 1 nodes I0904 16:08:21.025347 31343 utils.go:165] Machine "e2e-336b0-w-1-2f49v" is backing node "f3d83ee7-21fd-4ef5-bf29-5dfa22ea38f4" I0904 16:08:21.025373 31343 utils.go:149] MachineSet "e2e-336b0-w-1" have 1 nodes I0904 16:08:21.030509 31343 utils.go:165] Machine "e2e-336b0-w-2-chf54" is backing node "5944e33a-b080-44b2-b9c0-b4bee9b818e6" I0904 16:08:21.030536 31343 utils.go:149] MachineSet "e2e-336b0-w-2" have 1 nodes I0904 16:08:21.030583 31343 utils.go:177] Node "f04d1ba0-086a-426e-b543-563e6e04838a" is ready. Conditions are: [{OutOfDisk False 2019-09-04 16:08:19 +0000 UTC 2019-09-04 16:08:17 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 16:08:19 +0000 UTC 2019-09-04 16:08:17 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 16:08:19 +0000 UTC 2019-09-04 16:08:17 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 16:08:19 +0000 UTC 2019-09-04 16:08:17 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 16:08:19 +0000 UTC 2019-09-04 16:08:17 +0000 UTC KubeletReady kubelet is posting ready status}] I0904 16:08:21.030662 31343 utils.go:177] Node "f3d83ee7-21fd-4ef5-bf29-5dfa22ea38f4" is ready. Conditions are: [{OutOfDisk False 2019-09-04 16:08:20 +0000 UTC 2019-09-04 16:08:18 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 16:08:20 +0000 UTC 2019-09-04 16:08:18 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 16:08:20 +0000 UTC 2019-09-04 16:08:18 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 16:08:20 +0000 UTC 2019-09-04 16:08:18 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 16:08:20 +0000 UTC 2019-09-04 16:08:18 +0000 UTC KubeletReady kubelet is posting ready status}] I0904 16:08:21.030706 31343 utils.go:177] Node "5944e33a-b080-44b2-b9c0-b4bee9b818e6" is ready. Conditions are: [{OutOfDisk False 2019-09-04 16:08:19 +0000 UTC 2019-09-04 16:08:19 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 16:08:19 +0000 UTC 2019-09-04 16:08:19 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 16:08:19 +0000 UTC 2019-09-04 16:08:19 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 16:08:19 +0000 UTC 2019-09-04 16:08:19 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 16:08:19 +0000 UTC 2019-09-04 16:08:19 +0000 UTC KubeletReady kubelet is posting ready status}] STEP: Getting nodes STEP: Creating 3 machineautoscalers I0904 16:08:21.033787 31343 autoscaler.go:340] Create MachineAutoscaler backed by MachineSet kube-system/e2e-336b0-w-0 - min:1, max:2 I0904 16:08:21.040195 31343 autoscaler.go:340] Create MachineAutoscaler backed by MachineSet kube-system/e2e-336b0-w-1 - min:1, max:2 I0904 16:08:21.043839 31343 autoscaler.go:340] Create MachineAutoscaler backed by MachineSet kube-system/e2e-336b0-w-2 - min:1, max:2 STEP: Creating ClusterAutoscaler configured with maxNodesTotal:10 STEP: Deriving Memory capacity from machine "kubemark-actuator-testing-machineset" I0904 16:08:21.158022 31343 autoscaler.go:377] Memory capacity of worker node "1683dbc1-25e9-424c-94c0-5f6e2f3b5bab" is 3840Mi STEP: Creating scale-out workload: jobs: 11, memory: 2818572300 I0904 16:08:21.183317 31343 autoscaler.go:399] [15m0s remaining] Expecting 2 "ScaledUpGroup" events; observed 0 I0904 16:08:23.013209 31343 autoscaler.go:361] cluster-autoscaler: cluster-autoscaler-default-598c649f66-p5mnf became leader I0904 16:08:24.183473 31343 autoscaler.go:399] [14m57s remaining] Expecting 2 "ScaledUpGroup" events; observed 0 I0904 16:08:27.183624 31343 autoscaler.go:399] [14m54s remaining] Expecting 2 "ScaledUpGroup" events; observed 0 I0904 16:08:30.183881 31343 autoscaler.go:399] [14m51s remaining] Expecting 2 "ScaledUpGroup" events; observed 0 I0904 16:08:33.172918 31343 autoscaler.go:361] cluster-autoscaler-status: Max total nodes in cluster reached: 10 I0904 16:08:33.175073 31343 autoscaler.go:361] cluster-autoscaler-status: Scale-up: setting group kube-system/e2e-336b0-w-1 size to 2 I0904 16:08:33.183536 31343 autoscaler.go:361] cluster-autoscaler-status: Scale-up: group kube-system/e2e-336b0-w-1 size set to 2 I0904 16:08:33.184608 31343 autoscaler.go:399] [14m48s remaining] Expecting 2 "ScaledUpGroup" events; observed 1 I0904 16:08:33.190835 31343 autoscaler.go:361] e2e-autoscaler-workload-c6k57: pod triggered scale-up: [{kube-system/e2e-336b0-w-1 1->2 (max: 2)}] I0904 16:08:33.202568 31343 autoscaler.go:361] e2e-autoscaler-workload-4pksh: pod triggered scale-up: [{kube-system/e2e-336b0-w-1 1->2 (max: 2)}] I0904 16:08:33.206871 31343 autoscaler.go:361] e2e-autoscaler-workload-pxx5t: pod triggered scale-up: [{kube-system/e2e-336b0-w-1 1->2 (max: 2)}] I0904 16:08:33.210784 31343 autoscaler.go:361] e2e-autoscaler-workload-cvv84: pod triggered scale-up: [{kube-system/e2e-336b0-w-1 1->2 (max: 2)}] I0904 16:08:33.219791 31343 autoscaler.go:361] e2e-autoscaler-workload-b5qtw: pod triggered scale-up: [{kube-system/e2e-336b0-w-1 1->2 (max: 2)}] I0904 16:08:33.229732 31343 autoscaler.go:361] e2e-autoscaler-workload-gv8nf: pod triggered scale-up: [{kube-system/e2e-336b0-w-1 1->2 (max: 2)}] I0904 16:08:33.236764 31343 autoscaler.go:361] e2e-autoscaler-workload-q9zt6: pod triggered scale-up: [{kube-system/e2e-336b0-w-1 1->2 (max: 2)}] I0904 16:08:33.372051 31343 autoscaler.go:361] e2e-autoscaler-workload-q8d7r: pod triggered scale-up: [{kube-system/e2e-336b0-w-1 1->2 (max: 2)}] I0904 16:08:36.184813 31343 autoscaler.go:399] [14m45s remaining] Expecting 2 "ScaledUpGroup" events; observed 1 I0904 16:08:39.185813 31343 autoscaler.go:399] [14m42s remaining] Expecting 2 "ScaledUpGroup" events; observed 1 I0904 16:08:42.186027 31343 autoscaler.go:399] [14m39s remaining] Expecting 2 "ScaledUpGroup" events; observed 1 I0904 16:08:43.210655 31343 autoscaler.go:361] cluster-autoscaler-status: Scale-up: setting group kube-system/e2e-336b0-w-0 size to 2 I0904 16:08:43.220194 31343 autoscaler.go:361] cluster-autoscaler-status: Scale-up: group kube-system/e2e-336b0-w-0 size set to 2 I0904 16:08:43.224832 31343 autoscaler.go:361] e2e-autoscaler-workload-q9zt6: pod triggered scale-up: [{kube-system/e2e-336b0-w-0 1->2 (max: 2)}] I0904 16:08:43.231707 31343 autoscaler.go:361] e2e-autoscaler-workload-q8d7r: pod triggered scale-up: [{kube-system/e2e-336b0-w-0 1->2 (max: 2)}] I0904 16:08:43.239883 31343 autoscaler.go:361] e2e-autoscaler-workload-4pksh: pod triggered scale-up: [{kube-system/e2e-336b0-w-0 1->2 (max: 2)}] I0904 16:08:43.244711 31343 autoscaler.go:361] e2e-autoscaler-workload-pxx5t: pod triggered scale-up: [{kube-system/e2e-336b0-w-0 1->2 (max: 2)}] I0904 16:08:43.247814 31343 autoscaler.go:361] e2e-autoscaler-workload-gv8nf: pod triggered scale-up: [{kube-system/e2e-336b0-w-0 1->2 (max: 2)}] I0904 16:08:43.252787 31343 autoscaler.go:361] e2e-autoscaler-workload-cvv84: pod triggered scale-up: [{kube-system/e2e-336b0-w-0 1->2 (max: 2)}] I0904 16:08:43.261014 31343 autoscaler.go:361] e2e-autoscaler-workload-c6k57: pod triggered scale-up: [{kube-system/e2e-336b0-w-0 1->2 (max: 2)}] I0904 16:08:45.186244 31343 autoscaler.go:399] [14m36s remaining] Expecting 2 "ScaledUpGroup" events; observed 2 I0904 16:08:45.187101 31343 autoscaler.go:414] [1m0s remaining] Waiting for cluster-autoscaler to generate a "MaxNodesTotalReached" event; observed 1 I0904 16:08:45.187133 31343 autoscaler.go:422] [1m0s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:08:48.187330 31343 autoscaler.go:422] [57s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:08:51.187548 31343 autoscaler.go:422] [54s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:08:54.187776 31343 autoscaler.go:422] [51s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:08:57.188037 31343 autoscaler.go:422] [48s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:00.188281 31343 autoscaler.go:422] [45s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:03.189280 31343 autoscaler.go:422] [42s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:06.189561 31343 autoscaler.go:422] [39s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:09.189778 31343 autoscaler.go:422] [36s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:12.190023 31343 autoscaler.go:422] [33s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:15.190249 31343 autoscaler.go:422] [30s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:18.190464 31343 autoscaler.go:422] [27s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:21.190679 31343 autoscaler.go:422] [24s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:24.190915 31343 autoscaler.go:422] [21s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:27.191084 31343 autoscaler.go:422] [18s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:30.191341 31343 autoscaler.go:422] [15s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:33.191591 31343 autoscaler.go:422] [12s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:36.191898 31343 autoscaler.go:422] [9s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:39.192117 31343 autoscaler.go:422] [6s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 I0904 16:09:42.192360 31343 autoscaler.go:422] [3s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2 STEP: Deleting workload I0904 16:09:45.187291 31343 autoscaler.go:249] [cleanup] "e2e-autoscaler-workload" (*v1.Job) I0904 16:09:45.192630 31343 autoscaler.go:434] [15m0s remaining] Expecting 2 "ScaleDownEmpty" events; observed 2 I0904 16:09:45.210456 31343 autoscaler.go:445] still have workload POD: "e2e-autoscaler-workload-4pksh" I0904 16:09:45.210485 31343 autoscaler.go:249] [cleanup] "default" (*v1.ClusterAutoscaler) I0904 16:09:45.256709 31343 autoscaler.go:465] Waiting for cluster-autoscaler POD "cluster-autoscaler-default-598c649f66-p5mnf" to disappear STEP: Scaling transient machinesets to zero I0904 16:09:45.256748 31343 autoscaler.go:474] Scaling transient machineset "e2e-336b0-w-0" to zero I0904 16:09:45.266766 31343 autoscaler.go:474] Scaling transient machineset "e2e-336b0-w-1" to zero I0904 16:09:45.273896 31343 autoscaler.go:474] Scaling transient machineset "e2e-336b0-w-2" to zero STEP: Waiting for scaled up nodes to be deleted I0904 16:09:45.294687 31343 autoscaler.go:491] [15m0s remaining] Waiting for cluster to reach original node count of 5; currently have 10 I0904 16:09:48.297755 31343 autoscaler.go:491] [14m57s remaining] Waiting for cluster to reach original node count of 5; currently have 5 STEP: Waiting for scaled up machines to be deleted I0904 16:09:48.300969 31343 autoscaler.go:501] [15m0s remaining] Waiting for cluster to reach original machine count of 5; currently have 5 I0904 16:09:48.300997 31343 autoscaler.go:249] [cleanup] "e2e-336b0-w-0" (*v1beta1.MachineSet) I0904 16:09:48.304663 31343 autoscaler.go:249] [cleanup] "e2e-336b0-w-1" (*v1beta1.MachineSet) I0904 16:09:48.308589 31343 autoscaler.go:249] [cleanup] "e2e-336b0-w-2" (*v1beta1.MachineSet) I0904 16:09:48.313711 31343 autoscaler.go:249] [cleanup] "autoscale-e2e-336b0-w-0clsbx" (*v1beta1.MachineAutoscaler) I0904 16:09:48.323750 31343 autoscaler.go:249] [cleanup] "autoscale-e2e-336b0-w-1hj9gw" (*v1beta1.MachineAutoscaler) I0904 16:09:48.333624 31343 autoscaler.go:249] [cleanup] "autoscale-e2e-336b0-w-2zpc4r" (*v1beta1.MachineAutoscaler) • [SLOW TEST:93.420 seconds] [Feature:Machines] Autoscaler should /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:233 scale up and down /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:234 ------------------------------ S ------------------------------ [Feature:Machines] Managed cluster should have machines linked with nodes /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:136 I0904 16:09:48.339487 31343 framework.go:406] >>> kubeConfig: /root/.kube/config I0904 16:09:48.359164 31343 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines I0904 16:09:48.359198 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-8mk44" is linked to node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" I0904 16:09:48.359216 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-zrxlb" is linked to node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da" I0904 16:09:48.359230 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-lhs6r" is linked to node "1683dbc1-25e9-424c-94c0-5f6e2f3b5bab" I0904 16:09:48.359244 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-566rv" is linked to node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1" I0904 16:09:48.359258 31343 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube" • ------------------------------ [Feature:Machines] Managed cluster should have ability to additively reconcile taints from machine to nodes /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:145 I0904 16:09:48.359317 31343 framework.go:406] >>> kubeConfig: /root/.kube/config STEP: getting machine "kubemark-actuator-testing-machineset-blue-8mk44" I0904 16:09:48.384682 31343 utils.go:165] Machine "kubemark-actuator-testing-machineset-blue-8mk44" is backing node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" STEP: getting the backed node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" STEP: updating node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" with taint: {not-from-machine true NoSchedule } STEP: updating machine "kubemark-actuator-testing-machineset-blue-8mk44" with taint: {from-machine-6b1b07e3-cf2e-11e9-82ed-0a1de5b610ea true NoSchedule } I0904 16:09:48.395004 31343 infra.go:184] Getting node from machine again for verification of taints I0904 16:09:48.401828 31343 utils.go:165] Machine "kubemark-actuator-testing-machineset-blue-8mk44" is backing node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" I0904 16:09:48.401857 31343 infra.go:194] Expected : map[from-machine-6b1b07e3-cf2e-11e9-82ed-0a1de5b610ea:{} not-from-machine:{}], observed map[kubemark:{} not-from-machine:{} from-machine-6b1b07e3-cf2e-11e9-82ed-0a1de5b610ea:{}] , difference map[], STEP: Getting the latest version of the original machine STEP: Setting back the original machine taints STEP: Getting the latest version of the node I0904 16:09:48.409444 31343 utils.go:165] Machine "kubemark-actuator-testing-machineset-blue-8mk44" is backing node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" STEP: Setting back the original node taints • ------------------------------ [Feature:Machines] Managed cluster should recover from deleted worker machines /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:220 I0904 16:09:48.414743 31343 framework.go:406] >>> kubeConfig: /root/.kube/config STEP: checking initial cluster state I0904 16:09:48.440213 31343 utils.go:87] Cluster size is 5 nodes I0904 16:09:48.440250 31343 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes I0904 16:09:48.444434 31343 utils.go:99] MachineSet "e2e-336b0-w-0" replicas 0. Ready: 0, available 0 I0904 16:09:48.444462 31343 utils.go:99] MachineSet "e2e-336b0-w-1" replicas 0. Ready: 0, available 0 I0904 16:09:48.444472 31343 utils.go:99] MachineSet "e2e-336b0-w-2" replicas 0. Ready: 0, available 0 I0904 16:09:48.444480 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0904 16:09:48.444489 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0904 16:09:48.444498 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0904 16:09:48.444513 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0904 16:09:48.447505 31343 utils.go:231] Node "1683dbc1-25e9-424c-94c0-5f6e2f3b5bab". Ready: true. Unschedulable: false I0904 16:09:48.447526 31343 utils.go:231] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1". Ready: true. Unschedulable: false I0904 16:09:48.447536 31343 utils.go:231] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da". Ready: true. Unschedulable: false I0904 16:09:48.447542 31343 utils.go:231] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c". Ready: true. Unschedulable: false I0904 16:09:48.447589 31343 utils.go:231] Node "minikube". Ready: true. Unschedulable: false I0904 16:09:48.452880 31343 utils.go:87] Cluster size is 5 nodes I0904 16:09:48.452905 31343 utils.go:257] waiting for all nodes to be ready I0904 16:09:48.459598 31343 utils.go:262] waiting for all nodes to be schedulable I0904 16:09:48.466681 31343 utils.go:290] [remaining 1m0s] Node "1683dbc1-25e9-424c-94c0-5f6e2f3b5bab" is schedulable I0904 16:09:48.466716 31343 utils.go:290] [remaining 1m0s] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1" is schedulable I0904 16:09:48.466727 31343 utils.go:290] [remaining 1m0s] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da" is schedulable I0904 16:09:48.466738 31343 utils.go:290] [remaining 1m0s] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" is schedulable I0904 16:09:48.466747 31343 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable I0904 16:09:48.466756 31343 utils.go:267] waiting for each node to be backed by a machine I0904 16:09:48.478834 31343 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines I0904 16:09:48.478873 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-8mk44" is linked to node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" I0904 16:09:48.478884 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-zrxlb" is linked to node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da" I0904 16:09:48.478892 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-lhs6r" is linked to node "1683dbc1-25e9-424c-94c0-5f6e2f3b5bab" I0904 16:09:48.478901 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-566rv" is linked to node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1" I0904 16:09:48.478909 31343 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube" STEP: getting worker node STEP: deleting machine object "kubemark-actuator-testing-machineset-lhs6r" STEP: waiting for node object "1683dbc1-25e9-424c-94c0-5f6e2f3b5bab" to go away I0904 16:09:48.501172 31343 infra.go:255] Node "1683dbc1-25e9-424c-94c0-5f6e2f3b5bab" still exists. Node conditions are: [{OutOfDisk False 2019-09-04 16:09:48 +0000 UTC 2019-09-04 16:07:23 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 16:09:48 +0000 UTC 2019-09-04 16:07:23 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 16:09:48 +0000 UTC 2019-09-04 16:07:23 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 16:09:48 +0000 UTC 2019-09-04 16:07:23 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 16:09:48 +0000 UTC 2019-09-04 16:07:23 +0000 UTC KubeletReady kubelet is posting ready status}] STEP: waiting for new node object to come up I0904 16:09:53.506286 31343 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes I0904 16:09:53.509266 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0904 16:09:53.509286 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0904 16:09:53.509292 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0904 16:09:53.509298 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0904 16:09:53.511880 31343 utils.go:231] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1". Ready: true. Unschedulable: false I0904 16:09:53.511900 31343 utils.go:231] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da". Ready: true. Unschedulable: false I0904 16:09:53.511906 31343 utils.go:231] Node "80076895-9615-40cc-9535-18c66ad0cef2". Ready: true. Unschedulable: false I0904 16:09:53.511911 31343 utils.go:231] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c". Ready: true. Unschedulable: false I0904 16:09:53.511916 31343 utils.go:231] Node "minikube". Ready: true. Unschedulable: false I0904 16:09:53.514496 31343 utils.go:87] Cluster size is 5 nodes I0904 16:09:53.514515 31343 utils.go:257] waiting for all nodes to be ready I0904 16:09:53.516985 31343 utils.go:262] waiting for all nodes to be schedulable I0904 16:09:53.519703 31343 utils.go:290] [remaining 1m0s] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1" is schedulable I0904 16:09:53.519726 31343 utils.go:290] [remaining 1m0s] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da" is schedulable I0904 16:09:53.519733 31343 utils.go:290] [remaining 1m0s] Node "80076895-9615-40cc-9535-18c66ad0cef2" is schedulable I0904 16:09:53.519741 31343 utils.go:290] [remaining 1m0s] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" is schedulable I0904 16:09:53.519752 31343 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable I0904 16:09:53.519760 31343 utils.go:267] waiting for each node to be backed by a machine I0904 16:09:53.525171 31343 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines I0904 16:09:53.525198 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-8mk44" is linked to node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" I0904 16:09:53.525209 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-zrxlb" is linked to node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da" I0904 16:09:53.525217 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-566rv" is linked to node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1" I0904 16:09:53.525229 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-zml5d" is linked to node "80076895-9615-40cc-9535-18c66ad0cef2" I0904 16:09:53.525250 31343 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube" • [SLOW TEST:5.111 seconds] [Feature:Machines] Managed cluster should /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126 recover from deleted worker machines /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:220 ------------------------------ [Feature:Machines] Managed cluster should grow and decrease when scaling different machineSets simultaneously /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:267 I0904 16:09:53.525367 31343 framework.go:406] >>> kubeConfig: /root/.kube/config STEP: checking existing cluster size I0904 16:09:53.540849 31343 utils.go:87] Cluster size is 5 nodes STEP: getting worker machineSets I0904 16:09:53.543748 31343 infra.go:297] Creating transient MachineSet "e2e-6e2d9-w-0" I0904 16:09:53.547755 31343 infra.go:297] Creating transient MachineSet "e2e-6e2d9-w-1" STEP: scaling "e2e-6e2d9-w-0" from 0 to 2 replicas I0904 16:09:53.552225 31343 framework.go:406] >>> kubeConfig: /root/.kube/config STEP: scaling "e2e-6e2d9-w-1" from 0 to 2 replicas I0904 16:09:53.575402 31343 framework.go:406] >>> kubeConfig: /root/.kube/config E0904 16:09:53.617056 31343 utils.go:157] Machine "e2e-6e2d9-w-0-2hfhb" has no NodeRef I0904 16:09:58.629883 31343 utils.go:165] Machine "e2e-6e2d9-w-0-2hfhb" is backing node "5990abf0-6713-4894-b1a5-b644f715c0d3" I0904 16:09:58.633382 31343 utils.go:165] Machine "e2e-6e2d9-w-0-6mp9h" is backing node "f0146bf9-e4da-4b54-b814-e8810d959059" I0904 16:09:58.633408 31343 utils.go:149] MachineSet "e2e-6e2d9-w-0" have 2 nodes I0904 16:09:58.645108 31343 utils.go:165] Machine "e2e-6e2d9-w-1-5swwn" is backing node "a1fdacb6-1a18-4da2-beec-a85047ca34c9" I0904 16:09:58.649647 31343 utils.go:165] Machine "e2e-6e2d9-w-1-z7vkt" is backing node "0760bafd-0d42-4313-905a-b8cecc50b160" I0904 16:09:58.649668 31343 utils.go:149] MachineSet "e2e-6e2d9-w-1" have 2 nodes I0904 16:09:58.649679 31343 utils.go:177] Node "5990abf0-6713-4894-b1a5-b644f715c0d3" is ready. Conditions are: [{OutOfDisk False 2019-09-04 16:09:57 +0000 UTC 2019-09-04 16:09:57 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 16:09:57 +0000 UTC 2019-09-04 16:09:57 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 16:09:57 +0000 UTC 2019-09-04 16:09:57 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 16:09:57 +0000 UTC 2019-09-04 16:09:57 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 16:09:57 +0000 UTC 2019-09-04 16:09:57 +0000 UTC KubeletReady kubelet is posting ready status}] I0904 16:09:58.649730 31343 utils.go:177] Node "f0146bf9-e4da-4b54-b814-e8810d959059" is ready. Conditions are: [{OutOfDisk False 2019-09-04 16:09:58 +0000 UTC 2019-09-04 16:09:56 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 16:09:58 +0000 UTC 2019-09-04 16:09:56 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 16:09:58 +0000 UTC 2019-09-04 16:09:56 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 16:09:58 +0000 UTC 2019-09-04 16:09:56 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 16:09:58 +0000 UTC 2019-09-04 16:09:56 +0000 UTC KubeletReady kubelet is posting ready status}] I0904 16:09:58.649769 31343 utils.go:177] Node "a1fdacb6-1a18-4da2-beec-a85047ca34c9" is ready. Conditions are: [{OutOfDisk False 2019-09-04 16:09:57 +0000 UTC 2019-09-04 16:09:57 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 16:09:57 +0000 UTC 2019-09-04 16:09:57 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 16:09:57 +0000 UTC 2019-09-04 16:09:57 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 16:09:57 +0000 UTC 2019-09-04 16:09:57 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 16:09:57 +0000 UTC 2019-09-04 16:09:57 +0000 UTC KubeletReady kubelet is posting ready status}] I0904 16:09:58.649804 31343 utils.go:177] Node "0760bafd-0d42-4313-905a-b8cecc50b160" is ready. Conditions are: [{OutOfDisk False 2019-09-04 16:09:58 +0000 UTC 2019-09-04 16:09:58 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 16:09:58 +0000 UTC 2019-09-04 16:09:58 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 16:09:58 +0000 UTC 2019-09-04 16:09:58 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 16:09:58 +0000 UTC 2019-09-04 16:09:58 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 16:09:58 +0000 UTC 2019-09-04 16:09:58 +0000 UTC KubeletReady kubelet is posting ready status}] STEP: scaling "e2e-6e2d9-w-0" from 2 to 0 replicas I0904 16:09:58.649851 31343 framework.go:406] >>> kubeConfig: /root/.kube/config STEP: scaling "e2e-6e2d9-w-1" from 2 to 0 replicas I0904 16:09:58.670220 31343 framework.go:406] >>> kubeConfig: /root/.kube/config STEP: waiting for cluster to get back to original size. Final size should be 5 nodes I0904 16:09:58.703731 31343 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes I0904 16:09:58.712061 31343 utils.go:99] MachineSet "e2e-6e2d9-w-0" replicas 0. Ready: 0, available 0 I0904 16:09:58.712086 31343 utils.go:99] MachineSet "e2e-6e2d9-w-1" replicas 0. Ready: 2, available 2 I0904 16:09:58.712096 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0904 16:09:58.712106 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0904 16:09:58.712115 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0904 16:09:58.712125 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0904 16:09:58.718729 31343 utils.go:231] Node "0760bafd-0d42-4313-905a-b8cecc50b160". Ready: true. Unschedulable: false I0904 16:09:58.718753 31343 utils.go:231] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1". Ready: true. Unschedulable: false I0904 16:09:58.718762 31343 utils.go:231] Node "5990abf0-6713-4894-b1a5-b644f715c0d3". Ready: true. Unschedulable: true I0904 16:09:58.718770 31343 utils.go:231] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da". Ready: true. Unschedulable: false I0904 16:09:58.718779 31343 utils.go:231] Node "80076895-9615-40cc-9535-18c66ad0cef2". Ready: true. Unschedulable: false I0904 16:09:58.718787 31343 utils.go:231] Node "a1fdacb6-1a18-4da2-beec-a85047ca34c9". Ready: true. Unschedulable: false I0904 16:09:58.718799 31343 utils.go:231] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c". Ready: true. Unschedulable: false I0904 16:09:58.718808 31343 utils.go:231] Node "f0146bf9-e4da-4b54-b814-e8810d959059". Ready: true. Unschedulable: false I0904 16:09:58.718818 31343 utils.go:231] Node "minikube". Ready: true. Unschedulable: false I0904 16:09:58.724720 31343 utils.go:87] Cluster size is 9 nodes I0904 16:10:03.724952 31343 utils.go:239] [remaining 14m55s] Cluster size expected to be 5 nodes I0904 16:10:03.730269 31343 utils.go:99] MachineSet "e2e-6e2d9-w-0" replicas 0. Ready: 0, available 0 I0904 16:10:03.730294 31343 utils.go:99] MachineSet "e2e-6e2d9-w-1" replicas 0. Ready: 0, available 0 I0904 16:10:03.730304 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0904 16:10:03.730313 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0904 16:10:03.730321 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0904 16:10:03.730331 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0904 16:10:03.733078 31343 utils.go:231] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1". Ready: true. Unschedulable: false I0904 16:10:03.733103 31343 utils.go:231] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da". Ready: true. Unschedulable: false I0904 16:10:03.733112 31343 utils.go:231] Node "80076895-9615-40cc-9535-18c66ad0cef2". Ready: true. Unschedulable: false I0904 16:10:03.733120 31343 utils.go:231] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c". Ready: true. Unschedulable: false I0904 16:10:03.733128 31343 utils.go:231] Node "minikube". Ready: true. Unschedulable: false I0904 16:10:03.735744 31343 utils.go:87] Cluster size is 5 nodes I0904 16:10:03.735776 31343 utils.go:257] waiting for all nodes to be ready I0904 16:10:03.738335 31343 utils.go:262] waiting for all nodes to be schedulable I0904 16:10:03.740803 31343 utils.go:290] [remaining 1m0s] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1" is schedulable I0904 16:10:03.740831 31343 utils.go:290] [remaining 1m0s] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da" is schedulable I0904 16:10:03.740842 31343 utils.go:290] [remaining 1m0s] Node "80076895-9615-40cc-9535-18c66ad0cef2" is schedulable I0904 16:10:03.740853 31343 utils.go:290] [remaining 1m0s] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" is schedulable I0904 16:10:03.740862 31343 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable I0904 16:10:03.740871 31343 utils.go:267] waiting for each node to be backed by a machine I0904 16:10:03.747154 31343 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines I0904 16:10:03.747181 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-8mk44" is linked to node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" I0904 16:10:03.747196 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-zrxlb" is linked to node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da" I0904 16:10:03.747204 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-566rv" is linked to node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1" I0904 16:10:03.747212 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-zml5d" is linked to node "80076895-9615-40cc-9535-18c66ad0cef2" I0904 16:10:03.747221 31343 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube" • [SLOW TEST:10.229 seconds] [Feature:Machines] Managed cluster should /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126 grow and decrease when scaling different machineSets simultaneously /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:267 ------------------------------ [Feature:Machines] Managed cluster should drain node before removing machine resource /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:346 I0904 16:10:03.754615 31343 framework.go:406] >>> kubeConfig: /root/.kube/config STEP: checking existing cluster size I0904 16:10:03.769480 31343 utils.go:87] Cluster size is 5 nodes STEP: Taking the first worker machineset (assuming only worker machines are backed by machinesets) STEP: Creating two new machines, one for node about to be drained, other for moving workload from drained node STEP: Waiting until both new nodes are ready E0904 16:10:03.783175 31343 utils.go:342] [remaining 15m0s] Expecting 2 nodes with map[string]string{"node-role.kubernetes.io/worker":"", "node-draining-test":"33644a25-cf2e-11e9-82ed-0a1de5b610ea"} labels in Ready state, got 0 I0904 16:10:08.787450 31343 utils.go:346] [14m55s remaining] Expected number (2) of nodes with map[node-role.kubernetes.io/worker: node-draining-test:33644a25-cf2e-11e9-82ed-0a1de5b610ea] label in Ready state found STEP: Creating RC with workload STEP: Creating PDB for RC STEP: Wait until all replicas are ready I0904 16:10:08.820431 31343 utils.go:396] [15m0s remaining] Waiting for at least one RC ready replica, ReadyReplicas: 0, Replicas: 0 I0904 16:10:13.822699 31343 utils.go:396] [14m55s remaining] Waiting for at least one RC ready replica, ReadyReplicas: 0, Replicas: 20 I0904 16:10:18.828453 31343 utils.go:399] [14m50s remaining] Waiting for RC ready replicas, ReadyReplicas: 20, Replicas: 20 I0904 16:10:18.840783 31343 utils.go:416] POD #0/20: { "metadata": { "name": "pdb-workload-44skz", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-44skz", "uid": "774ce4a1-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3629", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "e2b95e4f-f485-4be2-b168-06db1dc1c517", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:14Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.23", "podIP": "10.1.87.163", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:13Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://659ad13e9da9b149" } ], "qosClass": "Burstable" } } I0904 16:10:18.840960 31343 utils.go:416] POD #1/20: { "metadata": { "name": "pdb-workload-562sf", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-562sf", "uid": "77483249-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3614", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "e2b95e4f-f485-4be2-b168-06db1dc1c517", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:14Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.23", "podIP": "10.96.142.133", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:12Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://9eb5b14bdaf3b81f" } ], "qosClass": "Burstable" } } I0904 16:10:18.841130 31343 utils.go:416] POD #2/20: { "metadata": { "name": "pdb-workload-8hkkh", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-8hkkh", "uid": "7749ed79-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3645", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "e2b95e4f-f485-4be2-b168-06db1dc1c517", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:14Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.23", "podIP": "10.150.2.250", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:12Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://30cb9364cce051bf" } ], "qosClass": "Burstable" } } I0904 16:10:18.841293 31343 utils.go:416] POD #3/20: { "metadata": { "name": "pdb-workload-8mk8g", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-8mk8g", "uid": "77482348-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3651", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:15Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.22", "podIP": "10.83.140.22", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:13Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://cfa38c985545536b" } ], "qosClass": "Burstable" } } I0904 16:10:18.841465 31343 utils.go:416] POD #4/20: { "metadata": { "name": "pdb-workload-b64kw", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-b64kw", "uid": "7749ff9e-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3636", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:15Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.22", "podIP": "10.16.112.169", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:14Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://94c0b5da7b038274" } ], "qosClass": "Burstable" } } I0904 16:10:18.841652 31343 utils.go:416] POD #5/20: { "metadata": { "name": "pdb-workload-d9hmr", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-d9hmr", "uid": "7749a9d6-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3617", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "e2b95e4f-f485-4be2-b168-06db1dc1c517", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:14Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.23", "podIP": "10.4.23.125", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:13Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://3a5b9ebf8625c556" } ], "qosClass": "Burstable" } } I0904 16:10:18.841824 31343 utils.go:416] POD #6/20: { "metadata": { "name": "pdb-workload-dpdhc", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-dpdhc", "uid": "774839fa-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3639", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:15Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.22", "podIP": "10.130.209.46", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:12Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://8cbaa31cf930da75" } ], "qosClass": "Burstable" } } I0904 16:10:18.841991 31343 utils.go:416] POD #7/20: { "metadata": { "name": "pdb-workload-fxh4d", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-fxh4d", "uid": "774c9727-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3626", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "e2b95e4f-f485-4be2-b168-06db1dc1c517", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:14Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.23", "podIP": "10.120.196.208", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:14Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://131055c35f161e1f" } ], "qosClass": "Burstable" } } I0904 16:10:18.842156 31343 utils.go:416] POD #8/20: { "metadata": { "name": "pdb-workload-llfcf", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-llfcf", "uid": "7746da41-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3661", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:15Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.22", "podIP": "10.37.230.204", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:12Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://51fd727defdc7440" } ], "qosClass": "Burstable" } } I0904 16:10:18.842325 31343 utils.go:416] POD #9/20: { "metadata": { "name": "pdb-workload-lxjdl", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-lxjdl", "uid": "7749feb9-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3623", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "e2b95e4f-f485-4be2-b168-06db1dc1c517", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:14Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.23", "podIP": "10.106.236.16", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:12Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://76aabfeee9f550f2" } ], "qosClass": "Burstable" } } I0904 16:10:18.842512 31343 utils.go:416] POD #10/20: { "metadata": { "name": "pdb-workload-nljcz", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-nljcz", "uid": "7746312f-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3653", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:15Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.22", "podIP": "10.138.192.35", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:12Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://c619ba0844a858c6" } ], "qosClass": "Burstable" } } I0904 16:10:18.842697 31343 utils.go:416] POD #11/20: { "metadata": { "name": "pdb-workload-qh4qm", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-qh4qm", "uid": "7749eeab-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3611", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "e2b95e4f-f485-4be2-b168-06db1dc1c517", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:14Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.23", "podIP": "10.124.34.189", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:13Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://11b83cab86be9fce" } ], "qosClass": "Burstable" } } I0904 16:10:18.842865 31343 utils.go:416] POD #12/20: { "metadata": { "name": "pdb-workload-qr8vf", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-qr8vf", "uid": "774ccfe9-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3646", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:15Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.22", "podIP": "10.210.60.249", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:13Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://7d22044ec73d37b6" } ], "qosClass": "Burstable" } } I0904 16:10:18.843015 31343 utils.go:416] POD #13/20: { "metadata": { "name": "pdb-workload-tdckx", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-tdckx", "uid": "774cbaf9-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3647", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:15Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.22", "podIP": "10.3.79.215", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:15Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://309ee52824ebd9bc" } ], "qosClass": "Burstable" } } I0904 16:10:18.843175 31343 utils.go:416] POD #14/20: { "metadata": { "name": "pdb-workload-v957b", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-v957b", "uid": "7746cb15-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3605", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "e2b95e4f-f485-4be2-b168-06db1dc1c517", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:14Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.23", "podIP": "10.145.44.67", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:12Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://dafc656e30328bd5" } ], "qosClass": "Burstable" } } I0904 16:10:18.843361 31343 utils.go:416] POD #15/20: { "metadata": { "name": "pdb-workload-w7d6c", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-w7d6c", "uid": "774ccaf5-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3608", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "e2b95e4f-f485-4be2-b168-06db1dc1c517", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:14Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.23", "podIP": "10.253.106.182", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:14Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://9ce1f3549383af64" } ], "qosClass": "Burstable" } } I0904 16:10:18.843524 31343 utils.go:416] POD #16/20: { "metadata": { "name": "pdb-workload-x8bpj", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-x8bpj", "uid": "7749ebc7-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3658", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:15Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.22", "podIP": "10.75.58.116", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:14Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://199f9e0605216c31" } ], "qosClass": "Burstable" } } I0904 16:10:18.843845 31343 utils.go:416] POD #17/20: { "metadata": { "name": "pdb-workload-xvg24", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-xvg24", "uid": "774822b7-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3620", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "e2b95e4f-f485-4be2-b168-06db1dc1c517", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:14Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.23", "podIP": "10.63.172.5", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:13Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://56dbe2ec92b178b" } ], "qosClass": "Burstable" } } I0904 16:10:18.844012 31343 utils.go:416] POD #18/20: { "metadata": { "name": "pdb-workload-z5x5d", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-z5x5d", "uid": "7749fdb9-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3633", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:15Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.22", "podIP": "10.111.64.5", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:13Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://a6cec783c297c851" } ], "qosClass": "Burstable" } } I0904 16:10:18.844167 31343 utils.go:416] POD #19/20: { "metadata": { "name": "pdb-workload-zsjsn", "generateName": "pdb-workload-", "namespace": "default", "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-zsjsn", "uid": "7749efff-cf2e-11e9-b208-0a1de5b610ea", "resourceVersion": "3642", "creationTimestamp": "2019-09-04T16:10:08Z", "labels": { "app": "nginx" }, "ownerReferences": [ { "apiVersion": "v1", "kind": "ReplicationController", "name": "pdb-workload", "uid": "7744bf9c-cf2e-11e9-b208-0a1de5b610ea", "controller": true, "blockOwnerDeletion": true } ] }, "spec": { "volumes": [ { "name": "default-token-8gnqs", "secret": { "secretName": "default-token-8gnqs", "defaultMode": 420 } } ], "containers": [ { "name": "work", "image": "busybox", "command": [ "sleep", "10h" ], "resources": { "requests": { "cpu": "50m", "memory": "50Mi" } }, "volumeMounts": [ { "name": "default-token-8gnqs", "readOnly": true, "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount" } ], "terminationMessagePath": "/dev/termination-log", "terminationMessagePolicy": "File", "imagePullPolicy": "Always" } ], "restartPolicy": "Always", "terminationGracePeriodSeconds": 30, "dnsPolicy": "ClusterFirst", "nodeSelector": { "node-draining-test": "33644a25-cf2e-11e9-82ed-0a1de5b610ea", "node-role.kubernetes.io/worker": "" }, "serviceAccountName": "default", "serviceAccount": "default", "nodeName": "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", "securityContext": {}, "schedulerName": "default-scheduler", "tolerations": [ { "key": "kubemark", "operator": "Exists" }, { "key": "node.kubernetes.io/not-ready", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 }, { "key": "node.kubernetes.io/unreachable", "operator": "Exists", "effect": "NoExecute", "tolerationSeconds": 300 } ], "priority": 0 }, "status": { "phase": "Running", "conditions": [ { "type": "Initialized", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" }, { "type": "Ready", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:15Z" }, { "type": "ContainersReady", "status": "True", "lastProbeTime": null, "lastTransitionTime": null }, { "type": "PodScheduled", "status": "True", "lastProbeTime": null, "lastTransitionTime": "2019-09-04T16:10:08Z" } ], "hostIP": "172.17.0.22", "podIP": "10.203.226.194", "startTime": "2019-09-04T16:10:08Z", "containerStatuses": [ { "name": "work", "state": { "running": { "startedAt": "2019-09-04T16:10:14Z" } }, "lastState": {}, "ready": true, "restartCount": 0, "image": "busybox:latest", "imageID": "docker://busybox:latest", "containerID": "docker://7f97173b9c4f94c1" } ], "qosClass": "Burstable" } } STEP: Delete machine to trigger node draining STEP: Observing and verifying node draining E0904 16:10:18.865119 31343 utils.go:451] Node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" is expected to be marked as unschedulable, it is not I0904 16:10:23.869428 31343 utils.go:455] [remaining 14m55s] Node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" is mark unschedulable as expected I0904 16:10:23.876526 31343 utils.go:474] [remaining 14m55s] Have 9 pods scheduled to node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" I0904 16:10:23.878510 31343 utils.go:490] [remaining 14m55s] RC ReadyReplicas: 20, Replicas: 20 I0904 16:10:23.878534 31343 utils.go:500] [remaining 14m55s] Expecting at most 2 pods to be scheduled to drained node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", got 9 I0904 16:10:28.869758 31343 utils.go:455] [remaining 14m50s] Node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" is mark unschedulable as expected I0904 16:10:28.879334 31343 utils.go:474] [remaining 14m50s] Have 8 pods scheduled to node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" I0904 16:10:28.882202 31343 utils.go:490] [remaining 14m50s] RC ReadyReplicas: 20, Replicas: 20 I0904 16:10:28.882238 31343 utils.go:500] [remaining 14m50s] Expecting at most 2 pods to be scheduled to drained node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", got 8 I0904 16:10:33.870502 31343 utils.go:455] [remaining 14m45s] Node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" is mark unschedulable as expected I0904 16:10:33.877698 31343 utils.go:474] [remaining 14m45s] Have 7 pods scheduled to node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" I0904 16:10:33.879315 31343 utils.go:490] [remaining 14m45s] RC ReadyReplicas: 20, Replicas: 20 I0904 16:10:33.879348 31343 utils.go:500] [remaining 14m45s] Expecting at most 2 pods to be scheduled to drained node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", got 7 I0904 16:10:38.868856 31343 utils.go:455] [remaining 14m40s] Node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" is mark unschedulable as expected I0904 16:10:38.875034 31343 utils.go:474] [remaining 14m40s] Have 6 pods scheduled to node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" I0904 16:10:38.876632 31343 utils.go:490] [remaining 14m40s] RC ReadyReplicas: 20, Replicas: 20 I0904 16:10:38.876657 31343 utils.go:500] [remaining 14m40s] Expecting at most 2 pods to be scheduled to drained node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", got 6 I0904 16:10:43.870005 31343 utils.go:455] [remaining 14m35s] Node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" is mark unschedulable as expected I0904 16:10:43.876984 31343 utils.go:474] [remaining 14m35s] Have 5 pods scheduled to node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" I0904 16:10:43.878526 31343 utils.go:490] [remaining 14m35s] RC ReadyReplicas: 20, Replicas: 20 I0904 16:10:43.878551 31343 utils.go:500] [remaining 14m35s] Expecting at most 2 pods to be scheduled to drained node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", got 5 I0904 16:10:48.870107 31343 utils.go:455] [remaining 14m30s] Node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" is mark unschedulable as expected I0904 16:10:48.876600 31343 utils.go:474] [remaining 14m30s] Have 4 pods scheduled to node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" I0904 16:10:48.878988 31343 utils.go:490] [remaining 14m30s] RC ReadyReplicas: 20, Replicas: 20 I0904 16:10:48.879016 31343 utils.go:500] [remaining 14m30s] Expecting at most 2 pods to be scheduled to drained node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", got 4 I0904 16:10:53.869350 31343 utils.go:455] [remaining 14m25s] Node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" is mark unschedulable as expected I0904 16:10:53.875312 31343 utils.go:474] [remaining 14m25s] Have 3 pods scheduled to node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" I0904 16:10:53.877048 31343 utils.go:490] [remaining 14m25s] RC ReadyReplicas: 20, Replicas: 20 I0904 16:10:53.877073 31343 utils.go:500] [remaining 14m25s] Expecting at most 2 pods to be scheduled to drained node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36", got 3 I0904 16:10:58.869061 31343 utils.go:455] [remaining 14m20s] Node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" is mark unschedulable as expected I0904 16:10:58.875170 31343 utils.go:474] [remaining 14m20s] Have 2 pods scheduled to node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" I0904 16:10:58.876753 31343 utils.go:490] [remaining 14m20s] RC ReadyReplicas: 20, Replicas: 20 I0904 16:10:58.876777 31343 utils.go:504] [remaining 14m20s] Expected result: all pods from the RC up to last one or two got scheduled to a different node while respecting PDB STEP: Validating the machine is deleted E0904 16:10:58.878370 31343 infra.go:454] Machine "machine1" not yet deleted E0904 16:11:03.880592 31343 infra.go:454] Machine "machine1" not yet deleted I0904 16:11:08.880681 31343 infra.go:463] Machine "machine1" successfully deleted STEP: Validate underlying node corresponding to machine1 is removed as well I0904 16:11:08.882127 31343 utils.go:530] [15m0s remaining] Node "dd7372de-7bf8-4b42-b9aa-a32b9f251e36" successfully deleted STEP: Delete PDB STEP: Delete machine2 STEP: waiting for cluster to get back to original size. Final size should be 5 nodes I0904 16:11:08.889608 31343 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes I0904 16:11:08.893269 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0904 16:11:08.893293 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0904 16:11:08.893303 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0904 16:11:08.893312 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0904 16:11:08.896790 31343 utils.go:231] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1". Ready: true. Unschedulable: false I0904 16:11:08.896815 31343 utils.go:231] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da". Ready: true. Unschedulable: false I0904 16:11:08.896824 31343 utils.go:231] Node "80076895-9615-40cc-9535-18c66ad0cef2". Ready: true. Unschedulable: false I0904 16:11:08.896832 31343 utils.go:231] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c". Ready: true. Unschedulable: false I0904 16:11:08.896840 31343 utils.go:231] Node "e2b95e4f-f485-4be2-b168-06db1dc1c517". Ready: true. Unschedulable: false I0904 16:11:08.896848 31343 utils.go:231] Node "minikube". Ready: true. Unschedulable: false I0904 16:11:08.901776 31343 utils.go:87] Cluster size is 6 nodes I0904 16:11:13.902038 31343 utils.go:239] [remaining 14m55s] Cluster size expected to be 5 nodes I0904 16:11:13.905033 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0904 16:11:13.905055 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0904 16:11:13.905061 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0904 16:11:13.905067 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0904 16:11:13.908030 31343 utils.go:231] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1". Ready: true. Unschedulable: false I0904 16:11:13.908055 31343 utils.go:231] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da". Ready: true. Unschedulable: false I0904 16:11:13.908064 31343 utils.go:231] Node "80076895-9615-40cc-9535-18c66ad0cef2". Ready: true. Unschedulable: false I0904 16:11:13.908072 31343 utils.go:231] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c". Ready: true. Unschedulable: false I0904 16:11:13.908080 31343 utils.go:231] Node "e2b95e4f-f485-4be2-b168-06db1dc1c517". Ready: true. Unschedulable: true I0904 16:11:13.908088 31343 utils.go:231] Node "minikube". Ready: true. Unschedulable: false I0904 16:11:13.911405 31343 utils.go:87] Cluster size is 6 nodes I0904 16:11:18.902023 31343 utils.go:239] [remaining 14m50s] Cluster size expected to be 5 nodes I0904 16:11:18.905106 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0904 16:11:18.905128 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0904 16:11:18.905134 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0904 16:11:18.905140 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0904 16:11:18.907989 31343 utils.go:231] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1". Ready: true. Unschedulable: false I0904 16:11:18.908010 31343 utils.go:231] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da". Ready: true. Unschedulable: false I0904 16:11:18.908016 31343 utils.go:231] Node "80076895-9615-40cc-9535-18c66ad0cef2". Ready: true. Unschedulable: false I0904 16:11:18.908021 31343 utils.go:231] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c". Ready: true. Unschedulable: false I0904 16:11:18.908026 31343 utils.go:231] Node "e2b95e4f-f485-4be2-b168-06db1dc1c517". Ready: true. Unschedulable: true I0904 16:11:18.908031 31343 utils.go:231] Node "minikube". Ready: true. Unschedulable: false I0904 16:11:18.910742 31343 utils.go:87] Cluster size is 6 nodes I0904 16:11:23.902003 31343 utils.go:239] [remaining 14m45s] Cluster size expected to be 5 nodes I0904 16:11:23.905033 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0904 16:11:23.905055 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0904 16:11:23.905061 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0904 16:11:23.905066 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0904 16:11:23.908028 31343 utils.go:231] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1". Ready: true. Unschedulable: false I0904 16:11:23.908049 31343 utils.go:231] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da". Ready: true. Unschedulable: false I0904 16:11:23.908056 31343 utils.go:231] Node "80076895-9615-40cc-9535-18c66ad0cef2". Ready: true. Unschedulable: false I0904 16:11:23.908061 31343 utils.go:231] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c". Ready: true. Unschedulable: false I0904 16:11:23.908066 31343 utils.go:231] Node "e2b95e4f-f485-4be2-b168-06db1dc1c517". Ready: true. Unschedulable: true I0904 16:11:23.908071 31343 utils.go:231] Node "minikube". Ready: true. Unschedulable: false I0904 16:11:23.910849 31343 utils.go:87] Cluster size is 6 nodes I0904 16:11:28.902028 31343 utils.go:239] [remaining 14m40s] Cluster size expected to be 5 nodes I0904 16:11:28.904909 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0904 16:11:28.904934 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0904 16:11:28.904943 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0904 16:11:28.904952 31343 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0904 16:11:28.907621 31343 utils.go:231] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1". Ready: true. Unschedulable: false I0904 16:11:28.907641 31343 utils.go:231] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da". Ready: true. Unschedulable: false I0904 16:11:28.907646 31343 utils.go:231] Node "80076895-9615-40cc-9535-18c66ad0cef2". Ready: true. Unschedulable: false I0904 16:11:28.907651 31343 utils.go:231] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c". Ready: true. Unschedulable: false I0904 16:11:28.907656 31343 utils.go:231] Node "minikube". Ready: true. Unschedulable: false I0904 16:11:28.910148 31343 utils.go:87] Cluster size is 5 nodes I0904 16:11:28.910176 31343 utils.go:257] waiting for all nodes to be ready I0904 16:11:28.912858 31343 utils.go:262] waiting for all nodes to be schedulable I0904 16:11:28.915441 31343 utils.go:290] [remaining 1m0s] Node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1" is schedulable I0904 16:11:28.915473 31343 utils.go:290] [remaining 1m0s] Node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da" is schedulable I0904 16:11:28.915485 31343 utils.go:290] [remaining 1m0s] Node "80076895-9615-40cc-9535-18c66ad0cef2" is schedulable I0904 16:11:28.915495 31343 utils.go:290] [remaining 1m0s] Node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" is schedulable I0904 16:11:28.915505 31343 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable I0904 16:11:28.915520 31343 utils.go:267] waiting for each node to be backed by a machine I0904 16:11:28.922648 31343 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines I0904 16:11:28.922677 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-8mk44" is linked to node "ddd54ef8-8cf1-4b6c-bf00-32919eabf62c" I0904 16:11:28.922693 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-zrxlb" is linked to node "7ef1e50f-8bcf-499a-afb4-fc2aca1370da" I0904 16:11:28.922707 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-566rv" is linked to node "3f04fdee-11f3-437a-ba46-09f9e9ced5e1" I0904 16:11:28.922729 31343 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-zml5d" is linked to node "80076895-9615-40cc-9535-18c66ad0cef2" I0904 16:11:28.922742 31343 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube" I0904 16:11:28.934139 31343 utils.go:378] [15m0s remaining] Found 0 number of nodes with map[node-role.kubernetes.io/worker: node-draining-test:33644a25-cf2e-11e9-82ed-0a1de5b610ea] label as expected • [SLOW TEST:85.180 seconds] [Feature:Machines] Managed cluster should /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126 drain node before removing machine resource /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:346 ------------------------------ [Feature:Machines] Managed cluster should reject invalid machinesets /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:487 I0904 16:11:28.934257 31343 framework.go:406] >>> kubeConfig: /root/.kube/config STEP: Creating invalid machineset STEP: Waiting for ReconcileError MachineSet event I0904 16:11:28.990980 31343 infra.go:506] Fetching ReconcileError MachineSet invalid-machineset event I0904 16:11:34.021999 31343 infra.go:506] Fetching ReconcileError MachineSet invalid-machineset event I0904 16:11:34.022036 31343 infra.go:512] Found ReconcileError event for "invalid-machineset" machine set with the following message: "invalid-machineset" machineset validation failed: spec.template.metadata.labels: Invalid value: map[string]string{"big-kitty":"i-am-bit-kitty"}: `selector` does not match template `labels` STEP: Verify no machine from "invalid-machineset" machineset were created I0904 16:11:34.024915 31343 infra.go:528] Have 0 machines generated from "invalid-machineset" machineset STEP: Deleting invalid machineset • [SLOW TEST:5.094 seconds] [Feature:Machines] Managed cluster should /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126 reject invalid machinesets /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:487 ------------------------------ Ran 7 of 16 Specs in 199.110 seconds SUCCESS! -- 7 Passed | 0 Failed | 0 Pending | 9 Skipped --- PASS: TestE2E (199.11s) PASS ok github.com/openshift/cluster-api-actuator-pkg/pkg/e2e 199.156s + set +o xtrace ########## FINISHED STAGE: SUCCESS: RUN E2E TESTS [00h 04m 19s] ########## [PostBuildScript] - Executing post build scripts. [workspace] $ /bin/bash /tmp/jenkins5867762640308533448.sh ########## STARTING STAGE: DOWNLOAD ARTIFACTS FROM THE REMOTE HOST ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config + trap 'exit 0' EXIT ++ pwd + ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/gathered + rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/gathered + mkdir -p /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/gathered + tree /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/gathered /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/gathered 0 directories, 0 files + exit 0 [workspace] $ /bin/bash /tmp/jenkins9210298648789660748.sh ########## STARTING STAGE: GENERATE ARTIFACTS FROM THE REMOTE HOST ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config + trap 'exit 0' EXIT ++ pwd + ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/generated + rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/generated + mkdir /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/generated + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo docker version && sudo docker info && sudo docker images && sudo docker ps -a 2>&1' WARNING: You're not using the default seccomp profile + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo cat /etc/sysconfig/docker /etc/sysconfig/docker-network /etc/sysconfig/docker-storage /etc/sysconfig/docker-storage-setup /etc/systemd/system/docker.service 2>&1' + true + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo find /var/lib/docker/containers -name *.log | sudo xargs tail -vn +1 2>&1' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo ausearch -m AVC -m SELINUX_ERR -m USER_AVC 2>&1' + true + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo df -T -h && sudo pvs && sudo vgs && sudo lvs && sudo findmnt --all 2>&1' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo yum list installed 2>&1' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl --dmesg --no-pager --all --lines=all 2>&1' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl _PID=1 --no-pager --all --lines=all 2>&1' + tree /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/generated /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/generated ├── avc_denials.log ├── containers.log ├── dmesg.log ├── docker.config ├── docker.info ├── filesystem.info ├── installed_packages.log └── pid1.journal 0 directories, 8 files + exit 0 [workspace] $ /bin/bash /tmp/jenkins8848910018268914327.sh ########## STARTING STAGE: FETCH SYSTEMD JOURNALS FROM THE REMOTE HOST ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config + trap 'exit 0' EXIT ++ pwd + ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/journals + rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/journals + mkdir /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/journals + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit docker.service --no-pager --all --lines=all + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit dnsmasq.service --no-pager --all --lines=all + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all + tree /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/journals /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/artifacts/journals ├── dnsmasq.service ├── docker.service └── systemd-journald.service 0 directories, 3 files + exit 0 [workspace] $ /bin/bash /tmp/jenkins2441593752441850400.sh ########## STARTING STAGE: ASSEMBLE GCS OUTPUT ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config + trap 'exit 0' EXIT + mkdir -p gcs/artifacts gcs/artifacts/generated gcs/artifacts/journals gcs/artifacts/gathered ++ python -c 'import json; import urllib; print json.load(urllib.urlopen('\''https://ci.openshift.redhat.com/jenkins/job/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/548/api/json'\''))['\''result'\'']' + result=SUCCESS + cat ++ date +%s + cat /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/builds/548/log + cp artifacts/generated/avc_denials.log artifacts/generated/containers.log artifacts/generated/dmesg.log artifacts/generated/docker.config artifacts/generated/docker.info artifacts/generated/filesystem.info artifacts/generated/installed_packages.log artifacts/generated/pid1.journal gcs/artifacts/generated/ + cp artifacts/journals/dnsmasq.service artifacts/journals/docker.service artifacts/journals/systemd-journald.service gcs/artifacts/journals/ + cp -r 'artifacts/gathered/*' gcs/artifacts/ cp: cannot stat ‘artifacts/gathered/*’: No such file or directory ++ export status=FAILURE ++ status=FAILURE + exit 0 [workspace] $ /bin/bash /tmp/jenkins7806775163298608468.sh ########## STARTING STAGE: PUSH THE ARTIFACTS AND METADATA ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.h6ZAPMH85u + cat + chmod +x /tmp/tmp.h6ZAPMH85u + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.h6ZAPMH85u openshiftdevel:/tmp/tmp.h6ZAPMH85u + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 300 /tmp/tmp.h6ZAPMH85u"' + cd /home/origin + trap 'exit 0' EXIT + [[ -n {"type":"presubmit","job":"pull-ci-openshift-cluster-api-actuator-pkg-master-e2e","buildid":"1169277617493250048","prowjobid":"47aee29c-cf2c-11e9-bd8d-0a58ac102ebf","refs":{"org":"openshift","repo":"cluster-api-actuator-pkg","repo_link":"https://github.com/openshift/cluster-api-actuator-pkg","base_ref":"master","base_sha":"b3dad42ded9cf0288809ca2cef3311c06339e749","base_link":"https://github.com/openshift/cluster-api-actuator-pkg/commit/b3dad42ded9cf0288809ca2cef3311c06339e749","pulls":[{"number":114,"author":"frobware","sha":"801a89559c2b745e456c44495e502136bfd3391b","link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114","commit_link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114/commits/801a89559c2b745e456c44495e502136bfd3391b","author_link":"https://github.com/frobware"}]}} ]] ++ jq --compact-output '.buildid |= "548"' + JOB_SPEC='{"type":"presubmit","job":"pull-ci-openshift-cluster-api-actuator-pkg-master-e2e","buildid":"548","prowjobid":"47aee29c-cf2c-11e9-bd8d-0a58ac102ebf","refs":{"org":"openshift","repo":"cluster-api-actuator-pkg","repo_link":"https://github.com/openshift/cluster-api-actuator-pkg","base_ref":"master","base_sha":"b3dad42ded9cf0288809ca2cef3311c06339e749","base_link":"https://github.com/openshift/cluster-api-actuator-pkg/commit/b3dad42ded9cf0288809ca2cef3311c06339e749","pulls":[{"number":114,"author":"frobware","sha":"801a89559c2b745e456c44495e502136bfd3391b","link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114","commit_link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114/commits/801a89559c2b745e456c44495e502136bfd3391b","author_link":"https://github.com/frobware"}]}}' + docker run -e 'JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-cluster-api-actuator-pkg-master-e2e","buildid":"548","prowjobid":"47aee29c-cf2c-11e9-bd8d-0a58ac102ebf","refs":{"org":"openshift","repo":"cluster-api-actuator-pkg","repo_link":"https://github.com/openshift/cluster-api-actuator-pkg","base_ref":"master","base_sha":"b3dad42ded9cf0288809ca2cef3311c06339e749","base_link":"https://github.com/openshift/cluster-api-actuator-pkg/commit/b3dad42ded9cf0288809ca2cef3311c06339e749","pulls":[{"number":114,"author":"frobware","sha":"801a89559c2b745e456c44495e502136bfd3391b","link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114","commit_link":"https://github.com/openshift/cluster-api-actuator-pkg/pull/114/commits/801a89559c2b745e456c44495e502136bfd3391b","author_link":"https://github.com/frobware"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/gcsupload:latest --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin '/data/gcs/*' Unable to find image 'registry.svc.ci.openshift.org/ci/gcsupload:latest' locally Trying to pull repository registry.svc.ci.openshift.org/ci/gcsupload ... latest: Pulling from registry.svc.ci.openshift.org/ci/gcsupload a073c86ecf9e: Already exists cc3fc741b1a9: Already exists 822bed51ba40: Pulling fs layer 85cea451eec0: Pulling fs layer 85cea451eec0: Verifying Checksum 85cea451eec0: Download complete 822bed51ba40: Verifying Checksum 822bed51ba40: Download complete 822bed51ba40: Pull complete 85cea451eec0: Pull complete Digest: sha256:03aad50d7ec631ee07c12ac2ba679bd48c7781f7d5754f9e0dcc4e7260e35208 Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/gcsupload:latest {"component":"gcsupload","file":"prow/gcsupload/run.go:107","func":"k8s.io/test-infra/prow/gcsupload.Options.assembleTargets","level":"warning","msg":"Encountered error in resolving items to upload for /data/gcs/*: stat /data/gcs/*: no such file or directory","time":"2019-09-04T16:11:56Z"} {"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/548.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T16:11:56Z"} {"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T16:11:56Z"} {"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-actuator-pkg/114/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T16:11:56Z"} {"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/548.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T16:11:56Z"} {"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-actuator-pkg/114/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T16:11:56Z"} {"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T16:11:56Z"} {"component":"gcsupload","file":"prow/gcsupload/run.go:65","func":"k8s.io/test-infra/prow/gcsupload.Options.Run","level":"info","msg":"Finished upload to GCS","time":"2019-09-04T16:11:56Z"} + exit 0 + set +o xtrace ########## FINISHED STAGE: SUCCESS: PUSH THE ARTIFACTS AND METADATA [00h 00m 06s] ########## [workspace] $ /bin/bash /tmp/jenkins7542035323342997284.sh ########## STARTING STAGE: DEPROVISION CLOUD RESOURCES ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config + oct deprovision PLAYBOOK: main.yml ************************************************************* 4 plays in /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml PLAY [ensure we have the parameters necessary to deprovision virtual hosts] **** TASK [ensure all required variables are set] *********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:9 skipping: [localhost] => (item=origin_ci_inventory_dir) => { "changed": false, "generated_timestamp": "2019-09-04 12:11:57.949766", "item": "origin_ci_inventory_dir", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_region) => { "changed": false, "generated_timestamp": "2019-09-04 12:11:57.952962", "item": "origin_ci_aws_region", "skip_reason": "Conditional check failed", "skipped": true } PLAY [deprovision virtual hosts in EC2] **************************************** TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [deprovision a virtual EC2 host] ****************************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:28 included: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml for localhost TASK [update the SSH configuration to remove AWS EC2 specifics] **************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:2 ok: [localhost] => { "changed": false, "generated_timestamp": "2019-09-04 12:11:58.793986", "msg": "" } TASK [rename EC2 instance for termination reaper] ****************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:8 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-09-04 12:11:59.480704", "msg": "Tags {'Name': 'oct-terminate'} created for resource i-08faf914cd4493369." } TASK [tear down the EC2 instance] ********************************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:15 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-09-04 12:12:00.385070", "instance_ids": [ "i-08faf914cd4493369" ], "instances": [ { "ami_launch_index": "0", "architecture": "x86_64", "block_device_mapping": { "/dev/sda1": { "delete_on_termination": true, "status": "attached", "volume_id": "vol-00985a53f235b4484" }, "/dev/sdb": { "delete_on_termination": true, "status": "attached", "volume_id": "vol-02c3588bcfd4d1e4f" } }, "dns_name": "ec2-3-95-66-137.compute-1.amazonaws.com", "ebs_optimized": false, "groups": { "sg-7e73221a": "default" }, "hypervisor": "xen", "id": "i-08faf914cd4493369", "image_id": "ami-0b77b87a37c3e662c", "instance_type": "m4.xlarge", "kernel": null, "key_name": "libra", "launch_time": "2019-09-04T15:55:05.000Z", "placement": "us-east-1c", "private_dns_name": "ip-172-18-25-165.ec2.internal", "private_ip": "172.18.25.165", "public_dns_name": "ec2-3-95-66-137.compute-1.amazonaws.com", "public_ip": "3.95.66.137", "ramdisk": null, "region": "us-east-1", "root_device_name": "/dev/sda1", "root_device_type": "ebs", "state": "running", "state_code": 16, "tags": { "Name": "oct-terminate", "openshift_etcd": "", "openshift_master": "", "openshift_node": "" }, "tenancy": "default", "virtualization_type": "hvm" } ], "tagged_instances": [] } TASK [remove the serialized host variables] ************************************ task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:22 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-09-04 12:12:00.631641", "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.25.165.yml", "state": "absent" } PLAY [deprovision virtual hosts locally manged by Vagrant] ********************* TASK [Gathering Facts] ********************************************************* ok: [localhost] PLAY [clean up local configuration for deprovisioned instances] **************** TASK [remove inventory configuration directory] ******************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:61 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-09-04 12:12:01.147466", "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-actuator-pkg-master-e2e/workspace/.config/origin-ci-tool/inventory", "state": "absent" } PLAY RECAP ********************************************************************* localhost : ok=8 changed=4 unreachable=0 failed=0 + set +o xtrace ########## FINISHED STAGE: SUCCESS: DEPROVISION CLOUD RESOURCES [00h 00m 05s] ########## Archiving artifacts Recording test results [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is used... [WS-CLEANUP] done Finished: SUCCESS