Console Output

Started by user OpenShift CI Robot
[EnvInject] - Loading node environment variables.
Building in workspace /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2
[WS-CLEANUP] Deleting project workspace...
[WS-CLEANUP] Deferred wipeout is used...
[workspace@2] $ /bin/bash /tmp/jenkins6854203034917867747.sh
########## STARTING STAGE: INSTALL THE ORIGIN-CI-TOOL ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
++ readlink /var/lib/jenkins/origin-ci-tool/latest
+ latest=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
+ touch /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
+ cp /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin/activate /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
+ cat
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
+ mkdir -p /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
+ rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool
+ oct configure ansible-client verbosity 2
Option verbosity updated to be 2.
+ oct configure aws-client keypair_name libra
Option keypair_name updated to be libra.
+ oct configure aws-client private_key_path /var/lib/jenkins/.ssh/devenv.pem
Option private_key_path updated to be /var/lib/jenkins/.ssh/devenv.pem.
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL THE ORIGIN-CI-TOOL [00h 00m 01s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins7679877793779744962.sh
########## STARTING STAGE: PROVISION CLOUD RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
+ oct provision remote all-in-one --os rhel --stage base --provider aws --discrete-ssh-config --name pull-ci-openshift-cluster-autoscaler-operator-master-e2e_276

PLAYBOOK: aws-up.yml ***********************************************************
2 plays in /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml

PLAY [ensure we have the parameters necessary to bring up the AWS EC2 instance] ***

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml:9
skipping: [localhost] => (item=origin_ci_inventory_dir)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.674534", 
    "item": "origin_ci_inventory_dir", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_keypair_name)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.677033", 
    "item": "origin_ci_aws_keypair_name", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_private_key_path)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.680117", 
    "item": "origin_ci_aws_private_key_path", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_region)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.684561", 
    "item": "origin_ci_aws_region", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_ami_tags)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.687607", 
    "item": "origin_ci_aws_ami_tags", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_instance_name)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.690642", 
    "item": "origin_ci_aws_instance_name", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_instance_type)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.693675", 
    "item": "origin_ci_aws_master_instance_type", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_identifying_tag_key)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.696708", 
    "item": "origin_ci_aws_identifying_tag_key", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_hostname)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.699778", 
    "item": "origin_ci_aws_hostname", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_ssh_config_strategy)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.704019", 
    "item": "origin_ci_ssh_config_strategy", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=openshift_schedulable)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.707079", 
    "item": "openshift_schedulable", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=openshift_node_labels)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.710750", 
    "item": "openshift_node_labels", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml:27
skipping: [localhost] => (item=origin_ci_aws_master_subnet)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.743928", 
    "item": "origin_ci_aws_master_subnet", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_etcd_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.748601", 
    "item": "origin_ci_aws_etcd_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_node_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.753862", 
    "item": "origin_ci_aws_node_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.758083", 
    "item": "origin_ci_aws_master_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_external_elb_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.762326", 
    "item": "origin_ci_aws_master_external_elb_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_internal_elb_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.767699", 
    "item": "origin_ci_aws_master_internal_elb_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_router_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.771909", 
    "item": "origin_ci_aws_router_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_router_elb_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:51.777858", 
    "item": "origin_ci_aws_router_elb_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

PLAY [provision an AWS EC2 instance] *******************************************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

TASK [inventory : initialize the inventory directory] **************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:2
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:52.590079", 
    "gid": 995, 
    "group": "jenkins", 
    "mode": "0755", 
    "owner": "jenkins", 
    "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 997
}

TASK [inventory : add the nested group mapping] ********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:7
changed: [localhost] => {
    "changed": true, 
    "checksum": "18aaee00994df38cc3a63b635893175235331a9c", 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/nested_group_mappings", 
    "generated_timestamp": "2019-09-04 07:07:53.057826", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "b30c3226ea63efa3ff9c5e346c14a16e", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 93, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567595272.83-275960444856580/source", 
    "state": "file", 
    "uid": 997
}

TASK [inventory : initialize the OSEv3 group variables directory] **************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:12
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-04 07:07:53.228448", 
    "gid": 995, 
    "group": "jenkins", 
    "mode": "0755", 
    "owner": "jenkins", 
    "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/group_vars/OSEv3", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 997
}

TASK [inventory : initialize the host variables directory] *********************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:17
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-04 07:07:53.398073", 
    "gid": 995, 
    "group": "jenkins", 
    "mode": "0755", 
    "owner": "jenkins", 
    "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/host_vars", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 997
}

TASK [inventory : add the default Origin installation configuration] ***********
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:22
changed: [localhost] => {
    "changed": true, 
    "checksum": "4c06ba508f055c20f13426e8587342e8765a7b66", 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/group_vars/OSEv3/general.yml", 
    "generated_timestamp": "2019-09-04 07:07:53.686394", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "8aec71c75f7d512b278ae7c6f2959b12", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 331, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567595273.55-78236476875838/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : determine if we are inside AWS EC2] *****************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:2
changed: [localhost] => {
    "changed": true, 
    "cmd": [
        "curl", 
        "-s", 
        "http://instance-data.ec2.internal"
    ], 
    "delta": "0:00:00.014670", 
    "end": "2019-09-04 07:07:53.914554", 
    "failed": false, 
    "failed_when_result": false, 
    "generated_timestamp": "2019-09-04 07:07:53.930122", 
    "rc": 0, 
    "start": "2019-09-04 07:07:53.899884", 
    "stderr": [], 
    "stdout": [
        "1.0", 
        "2007-01-19", 
        "2007-03-01", 
        "2007-08-29", 
        "2007-10-10", 
        "2007-12-15", 
        "2008-02-01", 
        "2008-09-01", 
        "2009-04-04", 
        "2011-01-01", 
        "2011-05-01", 
        "2012-01-12", 
        "2014-02-25", 
        "2014-11-05", 
        "2015-10-20", 
        "2016-04-19", 
        "2016-06-30", 
        "2016-09-02", 
        "2018-03-28", 
        "2018-08-17", 
        "2018-09-24", 
        "latest"
    ], 
    "warnings": [
        "Consider using get_url or uri module rather than running curl"
    ]
}
 [WARNING]: Consider using get_url or uri module rather than running curl

TASK [aws-up : configure EC2 parameters for inventory when controlling from inside EC2] ***
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:7
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_destination_variable": "private_dns_name", 
        "origin_ci_aws_host_address_variable": "private_ip", 
        "origin_ci_aws_vpc_destination_variable": "private_ip_address"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:53.968074"
}

TASK [aws-up : determine where to put the AWS API cache] ***********************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:14
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_cache_dir": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ec2_cache"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:54.001980"
}

TASK [aws-up : ensure we have a place to put the AWS API cache] ****************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:18
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-04 07:07:54.163310", 
    "gid": 995, 
    "group": "jenkins", 
    "mode": "0755", 
    "owner": "jenkins", 
    "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ec2_cache", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 997
}

TASK [aws-up : place the EC2 dynamic inventory script] *************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:23
changed: [localhost] => {
    "changed": true, 
    "checksum": "625b8af723189db3b96ba0026d0f997a0025bc47", 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/ec2.py", 
    "generated_timestamp": "2019-09-04 07:07:54.454963", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "cac06c14065dac74904232b89d4ba24c", 
    "mode": "0755", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 63725, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567595274.32-189609424252617/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : place the EC2 dynamic inventory configuration] ******************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:29
changed: [localhost] => {
    "changed": true, 
    "checksum": "5cf1fa4e63c20f913a2ad8be3ddc2ba586190296", 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/ec2.ini", 
    "generated_timestamp": "2019-09-04 07:07:54.750709", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "abbb334a5551ac6c9172f89230957b70", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 437, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567595274.49-209175223961414/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : place the EC2 tag to group mappings] ****************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:34
changed: [localhost] => {
    "changed": true, 
    "checksum": "b4205a33dc73f62bd4f77f35d045cf8e09ae62b0", 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/tag_to_group_mappings", 
    "generated_timestamp": "2019-09-04 07:07:55.047696", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "bc3a567a1b6f342e1005182efc1b66be", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 287, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567595274.91-211332514565593/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : list available AMIs] ********************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:40
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:58.181444", 
    "results": [
        {
            "ami_id": "ami-04f9b88b6b0571f20", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 75, 
                    "snapshot_id": "snap-0655d2d962c590c8c", 
                    "volume_type": "gp2"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 50, 
                    "snapshot_id": "snap-0d86ae865b17f4def", 
                    "volume_type": "gp2"
                }
            }, 
            "creationDate": "2018-06-26T12:22:31.000Z", 
            "description": "OpenShift Origin development AMI on rhel at the base stage.", 
            "hypervisor": "xen", 
            "is_public": false, 
            "location": "531415883065/ami_build_origin_int_rhel_base_758", 
            "name": "ami_build_origin_int_rhel_base_758", 
            "owner_id": "531415883065", 
            "platform": null, 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "available", 
            "tags": {
                "Name": "ami_build_origin_int_rhel_base_758", 
                "image_stage": "base", 
                "operating_system": "rhel", 
                "ready": "yes"
            }, 
            "virtualization_type": "hvm"
        }, 
        {
            "ami_id": "ami-0b77b87a37c3e662c", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 75, 
                    "snapshot_id": "snap-02ec23d4818f2747e", 
                    "volume_type": "gp2"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 50, 
                    "snapshot_id": "snap-0d8726e441d4ca329", 
                    "volume_type": "gp2"
                }
            }, 
            "creationDate": "2018-06-26T22:18:53.000Z", 
            "description": "OpenShift Origin development AMI on rhel at the base stage.", 
            "hypervisor": "xen", 
            "is_public": false, 
            "location": "531415883065/ami_build_origin_int_rhel_base_760", 
            "name": "ami_build_origin_int_rhel_base_760", 
            "owner_id": "531415883065", 
            "platform": null, 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "available", 
            "tags": {
                "Name": "ami_build_origin_int_rhel_base_760", 
                "image_stage": "base", 
                "operating_system": "rhel", 
                "ready": "yes"
            }, 
            "virtualization_type": "hvm"
        }
    ]
}

TASK [aws-up : choose appropriate AMIs for use] ********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:49
ok: [localhost] => (item={u'ami_id': u'ami-04f9b88b6b0571f20', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the base stage.', u'tags': {u'ready': u'yes', u'image_stage': u'base', u'Name': u'ami_build_origin_int_rhel_base_758', u'operating_system': u'rhel'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-0d86ae865b17f4def', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 50}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-0655d2d962c590c8c', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 75}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_base_758', u'is_public': False, u'creationDate': u'2018-06-26T12:22:31.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_base_758'}) => {
    "ansible_facts": {
        "origin_ci_aws_ami_id_candidate": "ami-04f9b88b6b0571f20"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:58.226208", 
    "item": {
        "ami_id": "ami-04f9b88b6b0571f20", 
        "architecture": "x86_64", 
        "block_device_mapping": {
            "/dev/sda1": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 75, 
                "snapshot_id": "snap-0655d2d962c590c8c", 
                "volume_type": "gp2"
            }, 
            "/dev/sdb": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 50, 
                "snapshot_id": "snap-0d86ae865b17f4def", 
                "volume_type": "gp2"
            }
        }, 
        "creationDate": "2018-06-26T12:22:31.000Z", 
        "description": "OpenShift Origin development AMI on rhel at the base stage.", 
        "hypervisor": "xen", 
        "is_public": false, 
        "location": "531415883065/ami_build_origin_int_rhel_base_758", 
        "name": "ami_build_origin_int_rhel_base_758", 
        "owner_id": "531415883065", 
        "platform": null, 
        "root_device_name": "/dev/sda1", 
        "root_device_type": "ebs", 
        "state": "available", 
        "tags": {
            "Name": "ami_build_origin_int_rhel_base_758", 
            "image_stage": "base", 
            "operating_system": "rhel", 
            "ready": "yes"
        }, 
        "virtualization_type": "hvm"
    }
}
ok: [localhost] => (item={u'ami_id': u'ami-0b77b87a37c3e662c', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the base stage.', u'tags': {u'ready': u'yes', u'image_stage': u'base', u'Name': u'ami_build_origin_int_rhel_base_760', u'operating_system': u'rhel'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-0d8726e441d4ca329', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 50}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-02ec23d4818f2747e', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 75}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_base_760', u'is_public': False, u'creationDate': u'2018-06-26T22:18:53.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_base_760'}) => {
    "ansible_facts": {
        "origin_ci_aws_ami_id_candidate": "ami-0b77b87a37c3e662c"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:58.233707", 
    "item": {
        "ami_id": "ami-0b77b87a37c3e662c", 
        "architecture": "x86_64", 
        "block_device_mapping": {
            "/dev/sda1": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 75, 
                "snapshot_id": "snap-02ec23d4818f2747e", 
                "volume_type": "gp2"
            }, 
            "/dev/sdb": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 50, 
                "snapshot_id": "snap-0d8726e441d4ca329", 
                "volume_type": "gp2"
            }
        }, 
        "creationDate": "2018-06-26T22:18:53.000Z", 
        "description": "OpenShift Origin development AMI on rhel at the base stage.", 
        "hypervisor": "xen", 
        "is_public": false, 
        "location": "531415883065/ami_build_origin_int_rhel_base_760", 
        "name": "ami_build_origin_int_rhel_base_760", 
        "owner_id": "531415883065", 
        "platform": null, 
        "root_device_name": "/dev/sda1", 
        "root_device_type": "ebs", 
        "state": "available", 
        "tags": {
            "Name": "ami_build_origin_int_rhel_base_760", 
            "image_stage": "base", 
            "operating_system": "rhel", 
            "ready": "yes"
        }, 
        "virtualization_type": "hvm"
    }
}

TASK [aws-up : determine which AMI to use] *************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:55
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_ami_id": "ami-0b77b87a37c3e662c"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:58.268633"
}

TASK [aws-up : determine which subnets are available] **************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:60
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:59.210913", 
    "subnets": [
        {
            "availability_zone": "us-east-1c", 
            "available_ip_address_count": 4075, 
            "cidr_block": "172.18.16.0/20", 
            "default_for_az": "false", 
            "id": "subnet-8bdb5ac2", 
            "map_public_ip_on_launch": "true", 
            "state": "available", 
            "tags": {
                "Name": "devenv-subnet-2", 
                "origin_ci_aws_cluster_component": "master_subnet"
            }, 
            "vpc_id": "vpc-69705d0c"
        }, 
        {
            "availability_zone": "us-east-1d", 
            "available_ip_address_count": 3971, 
            "cidr_block": "172.18.0.0/20", 
            "default_for_az": "false", 
            "id": "subnet-cf57c596", 
            "map_public_ip_on_launch": "true", 
            "state": "available", 
            "tags": {
                "Name": "devenv-subnet-1", 
                "origin_ci_aws_cluster_component": "master_subnet"
            }, 
            "vpc_id": "vpc-69705d0c"
        }
    ]
}

TASK [aws-up : determine which subnets to use for the master] ******************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:67
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_master_subnet_ids": [
            "subnet-8bdb5ac2", 
            "subnet-cf57c596"
        ]
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:07:59.250595"
}

TASK [aws-up : determine which security groups are available] ******************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:72
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:08:00.218656", 
    "security_groups": [
        {
            "description": "default VPC security group", 
            "group_id": "sg-7e73221a", 
            "group_name": "default", 
            "ip_permissions": [
                {
                    "ip_protocol": "-1", 
                    "ip_ranges": [], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "user_id_group_pairs": [
                        {
                            "group_id": "sg-7e73221a", 
                            "user_id": "531415883065"
                        }
                    ]
                }, 
                {
                    "from_port": 53, 
                    "ip_protocol": "tcp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "119.254.120.64/26"
                        }, 
                        {
                            "cidr_ip": "209.132.176.0/20"
                        }, 
                        {
                            "cidr_ip": "209.132.186.34/32"
                        }, 
                        {
                            "cidr_ip": "213.175.37.10/32"
                        }, 
                        {
                            "cidr_ip": "62.40.79.66/32"
                        }, 
                        {
                            "cidr_ip": "66.187.224.0/20"
                        }, 
                        {
                            "cidr_ip": "66.187.239.0/24"
                        }, 
                        {
                            "cidr_ip": "38.140.108.0/24"
                        }, 
                        {
                            "cidr_ip": "213.175.37.9/32"
                        }, 
                        {
                            "cidr_ip": "38.99.12.232/29"
                        }, 
                        {
                            "cidr_ip": "4.14.33.72/30"
                        }, 
                        {
                            "cidr_ip": "4.14.35.88/29"
                        }, 
                        {
                            "cidr_ip": "50.227.40.96/29"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 8444, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": 22, 
                    "ip_protocol": "tcp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "0.0.0.0/0"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 22, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": 80, 
                    "ip_protocol": "tcp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "54.241.19.245/32"
                        }, 
                        {
                            "cidr_ip": "97.65.119.184/29"
                        }, 
                        {
                            "cidr_ip": "107.20.219.35/32"
                        }, 
                        {
                            "cidr_ip": "108.166.48.153/32"
                        }, 
                        {
                            "cidr_ip": "212.199.177.64/27"
                        }, 
                        {
                            "cidr_ip": "212.72.208.162/32"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 443, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": 53, 
                    "ip_protocol": "udp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "209.132.176.0/20"
                        }, 
                        {
                            "cidr_ip": "66.187.224.0/20"
                        }, 
                        {
                            "cidr_ip": "66.187.239.0/24"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 53, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": 0, 
                    "ip_protocol": "udp", 
                    "ip_ranges": [], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 65535, 
                    "user_id_group_pairs": [
                        {
                            "group_id": "sg-0d1a5377", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-5875023f", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-7e73221a", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-e1760186", 
                            "user_id": "531415883065"
                        }
                    ]
                }, 
                {
                    "from_port": 3389, 
                    "ip_protocol": "tcp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "0.0.0.0/0"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 3389, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": -1, 
                    "ip_protocol": "icmp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "0.0.0.0/0"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": -1, 
                    "user_id_group_pairs": []
                }
            ], 
            "ip_permissions_egress": [
                {
                    "ip_protocol": "-1", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "0.0.0.0/0"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "user_id_group_pairs": []
                }
            ], 
            "owner_id": "531415883065", 
            "tags": {
                "Name": "devenv-vpc", 
                "openshift_infra": "true", 
                "origin_ci_aws_cluster_component": "master_security_group"
            }, 
            "vpc_id": "vpc-69705d0c"
        }
    ]
}

TASK [aws-up : determine which security group to use] **************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:79
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_master_security_group_ids": [
            "sg-7e73221a"
        ]
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:08:00.260324"
}

TASK [aws-up : provision an AWS EC2 instance] **********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:84
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-04 07:08:22.855439", 
    "instance_ids": [
        "i-00026d36a3df1ef96"
    ], 
    "instances": [
        {
            "ami_launch_index": "0", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-028c56dcaee239954"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-0c9838ad75698812b"
                }
            }, 
            "dns_name": "ec2-54-227-18-68.compute-1.amazonaws.com", 
            "ebs_optimized": false, 
            "groups": {
                "sg-7e73221a": "default"
            }, 
            "hypervisor": "xen", 
            "id": "i-00026d36a3df1ef96", 
            "image_id": "ami-0b77b87a37c3e662c", 
            "instance_type": "m4.xlarge", 
            "kernel": null, 
            "key_name": "libra", 
            "launch_time": "2019-09-04T11:08:01.000Z", 
            "placement": "us-east-1c", 
            "private_dns_name": "ip-172-18-19-207.ec2.internal", 
            "private_ip": "172.18.19.207", 
            "public_dns_name": "ec2-54-227-18-68.compute-1.amazonaws.com", 
            "public_ip": "54.227.18.68", 
            "ramdisk": null, 
            "region": "us-east-1", 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "running", 
            "state_code": 16, 
            "tags": {
                "Name": "pull-ci-openshift-cluster-autoscaler-operator-master-e2e_276", 
                "openshift_etcd": "", 
                "openshift_master": "", 
                "openshift_node": ""
            }, 
            "tenancy": "default", 
            "virtualization_type": "hvm"
        }
    ], 
    "tagged_instances": []
}

TASK [aws-up : determine the host address] *************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:110
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_host": "172.18.19.207"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:08:22.894601"
}

TASK [aws-up : determine the default user to use for SSH] **********************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:114
skipping: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:08:22.927796", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [aws-up : determine the default user to use for SSH] **********************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:119
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_ssh_user": "origin"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:08:22.965198"
}

TASK [aws-up : update variables for the host] **********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:124
changed: [localhost] => {
    "changed": true, 
    "checksum": "436ffcd435e9abddc4e5ac8a73cacb396e22d18a", 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/host_vars/172.18.19.207.yml", 
    "generated_timestamp": "2019-09-04 07:08:23.290900", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "90fce33a411b23ef083a83c863eeb888", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 776, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567595303.15-171198560372179/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : determine where updated SSH configuration should go] ************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:141
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_ssh_config_files": [
            "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config"
        ]
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:08:23.331015"
}

TASK [aws-up : determine where updated SSH configuration should go] ************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:146
skipping: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:08:23.365518", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [aws-up : ensure the targeted SSH configuration file exists] **************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:151
changed: [localhost] => (item=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config) => {
    "changed": true, 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config", 
    "generated_timestamp": "2019-09-04 07:08:23.534937", 
    "gid": 995, 
    "group": "jenkins", 
    "item": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 0, 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : update the SSH configuration] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:157
changed: [localhost] => (item=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config) => {
    "changed": true, 
    "generated_timestamp": "2019-09-04 07:08:23.826482", 
    "item": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config", 
    "msg": "Block inserted"
}

TASK [aws-up : wait for SSH to be available] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:175
ok: [localhost] => {
    "changed": false, 
    "elapsed": 65, 
    "generated_timestamp": "2019-09-04 07:09:29.226666", 
    "path": null, 
    "port": 22, 
    "search_regex": null, 
    "state": "started"
}

PLAY RECAP *********************************************************************
localhost                  : ok=28   changed=13   unreachable=0    failed=0   

+ set +o xtrace
########## FINISHED STAGE: SUCCESS: PROVISION CLOUD RESOURCES [00h 01m 39s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins7491198972347366735.sh
########## STARTING STAGE: FORWARD GCS CREDENTIALS TO REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
+ (( i = 0 ))
+ (( i < 10 ))
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.config/gcloud/gcs-publisher-credentials.json openshiftdevel:/data/credentials.json
lost connection
+ (( i++  ))
+ (( i < 10 ))
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.config/gcloud/gcs-publisher-credentials.json openshiftdevel:/data/credentials.json
lost connection
+ (( i++  ))
+ (( i < 10 ))
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.config/gcloud/gcs-publisher-credentials.json openshiftdevel:/data/credentials.json
+ break
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: FORWARD GCS CREDENTIALS TO REMOTE HOST [00h 00m 02s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins2220437062741150783.sh
########## STARTING STAGE: FORWARD PARAMETERS TO THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod o+rw /etc/environment
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-cluster-autoscaler-operator-master-e2e","buildid":"1169205390341050368","prowjobid":"33b339da-cf04-11e9-ab71-0a58ac108d31","refs":{"org":"openshift","repo":"cluster-autoscaler-operator","repo_link":"https://github.com/openshift/cluster-autoscaler-operator","base_ref":"master","base_sha":"5408e9b6aa7c16908e7cdd5dc75d647c449601f3","base_link":"https://github.com/openshift/cluster-autoscaler-operator/commit/5408e9b6aa7c16908e7cdd5dc75d647c449601f3","pulls":[{"number":117,"author":"enxebre","sha":"b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117","commit_link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117/commits/b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","author_link":"https://github.com/enxebre"}]}}'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''buildId='\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_ID=1169205390341050368'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''REPO_OWNER=openshift'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''REPO_NAME=cluster-autoscaler-operator'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_BASE_REF=master'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_BASE_SHA=5408e9b6aa7c16908e7cdd5dc75d647c449601f3'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_REFS=master:5408e9b6aa7c16908e7cdd5dc75d647c449601f3,117:b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_NUMBER=117'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_PULL_SHA=b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-cluster-autoscaler-operator-master-e2e","buildid":"1169205390341050368","prowjobid":"33b339da-cf04-11e9-ab71-0a58ac108d31","refs":{"org":"openshift","repo":"cluster-autoscaler-operator","repo_link":"https://github.com/openshift/cluster-autoscaler-operator","base_ref":"master","base_sha":"5408e9b6aa7c16908e7cdd5dc75d647c449601f3","base_link":"https://github.com/openshift/cluster-autoscaler-operator/commit/5408e9b6aa7c16908e7cdd5dc75d647c449601f3","pulls":[{"number":117,"author":"enxebre","sha":"b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117","commit_link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117/commits/b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","author_link":"https://github.com/enxebre"}]}}'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_NUMBER=276'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''CLONEREFS_ARGS='\'' >> /etc/environment'
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: FORWARD PARAMETERS TO THE REMOTE HOST [00h 00m 04s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins3730084360108153379.sh
########## STARTING STAGE: SYNC REPOSITORIES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.cS8l2eha7B
+ cat
+ chmod +x /tmp/tmp.cS8l2eha7B
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.cS8l2eha7B openshiftdevel:/tmp/tmp.cS8l2eha7B
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.cS8l2eha7B"'
+ cd /home/origin
++ jq --compact-output '.buildid |= "276"'
+ JOB_SPEC='{"type":"presubmit","job":"pull-ci-openshift-cluster-autoscaler-operator-master-e2e","buildid":"276","prowjobid":"33b339da-cf04-11e9-ab71-0a58ac108d31","refs":{"org":"openshift","repo":"cluster-autoscaler-operator","repo_link":"https://github.com/openshift/cluster-autoscaler-operator","base_ref":"master","base_sha":"5408e9b6aa7c16908e7cdd5dc75d647c449601f3","base_link":"https://github.com/openshift/cluster-autoscaler-operator/commit/5408e9b6aa7c16908e7cdd5dc75d647c449601f3","pulls":[{"number":117,"author":"enxebre","sha":"b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117","commit_link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117/commits/b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","author_link":"https://github.com/enxebre"}]}}'
+ for image in ''\''registry.svc.ci.openshift.org/ci/clonerefs:latest'\''' ''\''registry.svc.ci.openshift.org/ci/initupload:latest'\'''
+ (( i = 0 ))
+ (( i < 5 ))
+ docker pull registry.svc.ci.openshift.org/ci/clonerefs:latest
Trying to pull repository registry.svc.ci.openshift.org/ci/clonerefs ... 
latest: Pulling from registry.svc.ci.openshift.org/ci/clonerefs
1160f4abea84: Pulling fs layer
be60dbe7622d: Pulling fs layer
d26b76701841: Pulling fs layer
1b90cab916ea: Pulling fs layer
3a00cbb24bdb: Pulling fs layer
1b90cab916ea: Waiting
3a00cbb24bdb: Waiting
1160f4abea84: Verifying Checksum
1160f4abea84: Download complete
be60dbe7622d: Verifying Checksum
be60dbe7622d: Download complete
d26b76701841: Verifying Checksum
d26b76701841: Download complete
3a00cbb24bdb: Verifying Checksum
3a00cbb24bdb: Download complete
1b90cab916ea: Verifying Checksum
1b90cab916ea: Download complete
1160f4abea84: Pull complete
be60dbe7622d: Pull complete
d26b76701841: Pull complete
1b90cab916ea: Pull complete
3a00cbb24bdb: Pull complete
Digest: sha256:d68e1c6c2de5c1167a79b24d5ba4f909349ca7a44fb634e214bdadc2c8b010cd
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/clonerefs:latest
+ break
+ for image in ''\''registry.svc.ci.openshift.org/ci/clonerefs:latest'\''' ''\''registry.svc.ci.openshift.org/ci/initupload:latest'\'''
+ (( i = 0 ))
+ (( i < 5 ))
+ docker pull registry.svc.ci.openshift.org/ci/initupload:latest
Trying to pull repository registry.svc.ci.openshift.org/ci/initupload ... 
latest: Pulling from registry.svc.ci.openshift.org/ci/initupload
a073c86ecf9e: Pulling fs layer
cc3fc741b1a9: Pulling fs layer
8f72556ef119: Pulling fs layer
8e5b170ec95b: Pulling fs layer
8e5b170ec95b: Waiting
a073c86ecf9e: Verifying Checksum
a073c86ecf9e: Download complete
cc3fc741b1a9: Verifying Checksum
cc3fc741b1a9: Download complete
8e5b170ec95b: Verifying Checksum
8e5b170ec95b: Download complete
a073c86ecf9e: Pull complete
8f72556ef119: Verifying Checksum
8f72556ef119: Download complete
cc3fc741b1a9: Pull complete
8f72556ef119: Pull complete
8e5b170ec95b: Pull complete
Digest: sha256:e651a6455ada7c070c439eddcd753e2e2ac1fb934c4f2a526c37a4674c8eaee4
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/initupload:latest
+ break
+ clonerefs_args='--repo=openshift,cluster-api-provider-kubemark=master --repo=openshift,machine-api-operator=master --repo=openshift,kubernetes-autoscaler=master '
+ docker run -v /data:/data:z registry.svc.ci.openshift.org/ci/clonerefs:latest --src-root=/data --log=/data/clone.json --repo=openshift,cluster-autoscaler-operator=master:5408e9b6aa7c16908e7cdd5dc75d647c449601f3,117:b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3 --repo=openshift,cluster-api-provider-kubemark=master --repo=openshift,machine-api-operator=master --repo=openshift,kubernetes-autoscaler=master
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-autoscaler-operator","base_ref":"master","base_sha":"5408e9b6aa7c16908e7cdd5dc75d647c449601f3","pulls":[{"number":117,"author":"","sha":"b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3"}]},"time":"2019-09-04T11:10:34Z"}
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"kubernetes-autoscaler","base_ref":"master"},"time":"2019-09-04T11:10:34Z"}
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-api-provider-kubemark","base_ref":"master"},"time":"2019-09-04T11:10:34Z"}
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"machine-api-operator","base_ref":"master"},"time":"2019-09-04T11:10:34Z"}
{"command":"mkdir -p /data/src/github.com/openshift/cluster-autoscaler-operator","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:34Z"}
{"command":"mkdir -p /data/src/github.com/openshift/cluster-api-provider-kubemark","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:34Z"}
{"command":"mkdir -p /data/src/github.com/openshift/kubernetes-autoscaler","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:34Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-autoscaler-operator/.git/\n","time":"2019-09-04T11:10:34Z"}
{"command":"mkdir -p /data/src/github.com/openshift/machine-api-operator","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:34Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:34Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/kubernetes-autoscaler/.git/\n","time":"2019-09-04T11:10:34Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-api-provider-kubemark/.git/\n","time":"2019-09-04T11:10:34Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:34Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/machine-api-operator/.git/\n","time":"2019-09-04T11:10:34Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:34Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:34Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:34Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:34Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:34Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:34Z"}
{"command":"git fetch https://github.com/openshift/machine-api-operator.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/machine-api-operator\n * branch            HEAD       -\u003e FETCH_HEAD\n * [new tag]         v0.1.0     -\u003e v0.1.0\n * [new tag]         v0.2.0     -\u003e v0.2.0\n","time":"2019-09-04T11:10:37Z"}
{"command":"git fetch https://github.com/openshift/machine-api-operator.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/machine-api-operator\n * branch            master     -\u003e FETCH_HEAD\n","time":"2019-09-04T11:10:37Z"}
{"command":"git fetch https://github.com/openshift/cluster-autoscaler-operator.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-autoscaler-operator\n * branch              HEAD       -\u003e FETCH_HEAD\n * [new tag]           v0.0.0     -\u003e v0.0.0\n","time":"2019-09-04T11:10:37Z"}
{"command":"git fetch https://github.com/openshift/cluster-autoscaler-operator.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-autoscaler-operator\n * branch              master     -\u003e FETCH_HEAD\n","time":"2019-09-04T11:10:38Z"}
{"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n  git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 474e14e... Merge pull request #391 from mgugino-upstream-stage/related-resources\n","time":"2019-09-04T11:10:38Z"}
{"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:38Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-04T11:10:38Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:38Z"}
{"command":"git checkout 5408e9b6aa7c16908e7cdd5dc75d647c449601f3","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out '5408e9b6aa7c16908e7cdd5dc75d647c449601f3'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n  git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 5408e9b6... Merge pull request #116 from ingvagabund/goimports\n","time":"2019-09-04T11:10:38Z"}
{"command":"git branch --force master 5408e9b6aa7c16908e7cdd5dc75d647c449601f3","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:38Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-04T11:10:38Z"}
{"command":"git fetch https://github.com/openshift/cluster-autoscaler-operator.git pull/117/head","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-autoscaler-operator\n * branch              refs/pull/117/head -\u003e FETCH_HEAD\n","time":"2019-09-04T11:10:39Z"}
{"command":"git fetch https://github.com/openshift/cluster-api-provider-kubemark.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-kubemark\n * branch              HEAD       -\u003e FETCH_HEAD\n * [new tag]           v1.0       -\u003e v1.0\n","time":"2019-09-04T11:10:39Z"}
{"command":"git merge --no-ff b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Merge made by the 'recursive' strategy.\n pkg/operator/operator.go      | 17 +++++++++++++++--\n pkg/operator/operator_test.go | 35 +++++++++++++++++++++++++++++++++++\n 2 files changed, 50 insertions(+), 2 deletions(-)\n create mode 100644 pkg/operator/operator_test.go\n","time":"2019-09-04T11:10:39Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:39Z"}
{"command":"git fetch https://github.com/openshift/cluster-api-provider-kubemark.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-kubemark\n * branch              master     -\u003e FETCH_HEAD\n","time":"2019-09-04T11:10:39Z"}
{"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n  git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 45659b31... Merge pull request #27 from frobware/bump-openshift-cluster-api-deps\n","time":"2019-09-04T11:10:41Z"}
{"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:41Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-04T11:10:41Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:41Z"}
{"command":"git fetch https://github.com/openshift/kubernetes-autoscaler.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/kubernetes-autoscaler\n * branch                HEAD                        -\u003e FETCH_HEAD\n * [new tag]             addon-resizer-1.8.0         -\u003e addon-resizer-1.8.0\n * [new tag]             addon-resizer-1.8.1         -\u003e addon-resizer-1.8.1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.37.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.37.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.38.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.38.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.39.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.39.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.40.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.40.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.41.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.41.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.42.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.42.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.43.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.43.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.44.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.44.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.46.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.46.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.47.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.47.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.50.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.50.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.51.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.51.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.52.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.52.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.53.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.53.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.54.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.54.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.56.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.56.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.57.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.57.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.58.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.58.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.60.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.60.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.61.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.61.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.63.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.63.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.64.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.64.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.65.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.65.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.66.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.66.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.67.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.67.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.68.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.68.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.69.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.69.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-1666 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-1666\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.1-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.1-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.10-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.10-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.11-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.11-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.12-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.12-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.13-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.13-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.14-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.14-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.15-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.15-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.16-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.16-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.17-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.17-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.18-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.18-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.2-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.2-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.21-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.21-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.22-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.22-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.23-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.23-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.3-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.3-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.5-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.5-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.6-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.6-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.7-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.7-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.8-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.8-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.9-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.9-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.10.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.10.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.11.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.11.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.13.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.13.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.14.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.14.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.15.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.15.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.16.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.16.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.17.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.17.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.18.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.18.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.19.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.19.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.20.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.20.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.21.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.21.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.22.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.22.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.23.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.23.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.24.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.24.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.25.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.25.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.26.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.26.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.27.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.27.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.28.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.28.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.30.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.30.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.32.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.32.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.5.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.5.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.7.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.7.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.8.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.8.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.9.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.9.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.100-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.100-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.104-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.104-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.105-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.105-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.106-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.106-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.107-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.107-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.108-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.108-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.109-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.109-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.11-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.11-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.110-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.110-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.111-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.111-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.112-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.112-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.113-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.113-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.114-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.114-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.115-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.115-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.116-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.116-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.117-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.117-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.119-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.119-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.12-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.12-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.121-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.121-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.122-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.122-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.123-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.123-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.124-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.124-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.125-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.125-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.126-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.126-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.127-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.127-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.128-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.128-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.129-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.129-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.13-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.13-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.130-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.130-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.131-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.131-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.132-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.132-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.133-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.133-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.134-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.134-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.135-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.135-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.136-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.136-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.137-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.137-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.138-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.138-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.139-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.139-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.14-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.14-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.140-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.140-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.141-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.141-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.142-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.142-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.15-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.15-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.16-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.16-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.17-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.17-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.18-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.18-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.19-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.19-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.20-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.20-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.21-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.21-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.22-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.22-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.23-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.23-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.24-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.24-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.25-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.25-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.26-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.26-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.27-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.27-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.28-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.28-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.29-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.29-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.3-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.3-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.30-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.30-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.31-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.31-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.32-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.32-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.33-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.33-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.34-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.34-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.35-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.35-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.36-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.36-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.37-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.37-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.38-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.38-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.39-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.39-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.4-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.4-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.40-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.40-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.41-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.41-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.42-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.42-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.43-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.43-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.44-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.44-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.45-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.45-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.46-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.46-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.47-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.47-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.49-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.49-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.5-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.5-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.50-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.50-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.51-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.51-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.53-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.53-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.54-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.54-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.55-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.55-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.56-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.56-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.57-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.57-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.58-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.58-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.59-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.59-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.6-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.6-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.60-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.60-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.61-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.61-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.62-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.62-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.63-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.63-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.64-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.64-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.65-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.65-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.66-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.66-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.67-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.67-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.69-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.69-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.7-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.7-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.71-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.71-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.72-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.72-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.73-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.73-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.74-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.74-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.75-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.75-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.76-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.76-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.77-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.77-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.78-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.78-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.79-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.79-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.8-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.8-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.81-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.81-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.82-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.82-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.83-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.83-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.85-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.85-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.86-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.86-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.87-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.87-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.88-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.88-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.9-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.9-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.90-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.90-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.91-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.91-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.92-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.92-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.93-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.93-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.94-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.94-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.95-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.95-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.96-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.96-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.97-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.97-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.98-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.98-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.99-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.99-1\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.10.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.10.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.100.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.100.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.101.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.101.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.102.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.102.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.103.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.103.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.104.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.104.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.105.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.105.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.106.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.106.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.107.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.107.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.109.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.109.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.110.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.110.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.112.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.112.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.114.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.114.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.115.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.115.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.116.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.116.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.117.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.117.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.118.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.118.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.119.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.119.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.12.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.12.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.122.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.122.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.123.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.123.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.124.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.124.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.125.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.125.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.128.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.128.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.13.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.13.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.130.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.130.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.131.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.131.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.132.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.132.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.136.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.136.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.137.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.137.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.138.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.138.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.139.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.139.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.14.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.14.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.140.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.140.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.141.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.141.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.142.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.142.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.143.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.143.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.144.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.144.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.145.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.145.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.146.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.146.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.147.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.147.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.148.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.148.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.149.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.149.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.15.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.15.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.17.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.17.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.18.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.18.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.19.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.19.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.20.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.20.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.21.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.21.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.22.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.22.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.23.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.23.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.24.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.24.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.25.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.25.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.26.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.26.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.27.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.27.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.28.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.28.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.29.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.29.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.30.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.30.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.31.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.31.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.32.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.32.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.33.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.33.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.36.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.36.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.37.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.37.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.38.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.38.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.39.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.39.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.4.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.4.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.40.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.40.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.41.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.41.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.42.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.42.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.43.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.43.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.44.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.44.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.45.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.45.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.46.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.46.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.47.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.47.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.48.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.48.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.49.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.49.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.5.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.5.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.50.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.50.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.51.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.51.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.52.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.52.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.55.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.55.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.56.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.56.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.57.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.57.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.58.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.58.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.59.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.59.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.6.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.6.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.60.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.60.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.62.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.62.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.63.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.63.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.66.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.66.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.69.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.69.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.7.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.7.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.70.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.70.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.72.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.72.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.74.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.74.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.75.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.75.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.76.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.76.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.77.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.77.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.79.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.79.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.8.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.8.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.80.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.80.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.81.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.81.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.82.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.82.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.83.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.83.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.84.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.84.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.85.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.85.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.87.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.87.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.88.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.88.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.89.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.89.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.9.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.9.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.91.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.91.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.92.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.92.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.93.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.93.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.94.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.94.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.95.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.95.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.96.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.96.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.97.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.97.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.98.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.98.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.99.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.99.0\n * [new tag]             cluster-autoscaler-0.5.2    -\u003e cluster-autoscaler-0.5.2\n * [new tag]             cluster-autoscaler-0.5.3    -\u003e cluster-autoscaler-0.5.3\n * [new tag]             cluster-autoscaler-0.5.4    -\u003e cluster-autoscaler-0.5.4\n * [new tag]             cluster-autoscaler-0.6.0    -\u003e cluster-autoscaler-0.6.0\n * [new tag]             cluster-autoscaler-0.6.1    -\u003e cluster-autoscaler-0.6.1\n * [new tag]             cluster-autoscaler-0.6.2    -\u003e cluster-autoscaler-0.6.2\n * [new tag]             cluster-autoscaler-0.6.3    -\u003e cluster-autoscaler-0.6.3\n * [new tag]             cluster-autoscaler-0.6.4    -\u003e cluster-autoscaler-0.6.4\n * [new tag]             cluster-autoscaler-1.0.0    -\u003e cluster-autoscaler-1.0.0\n * [new tag]             cluster-autoscaler-1.0.1    -\u003e cluster-autoscaler-1.0.1\n * [new tag]             cluster-autoscaler-1.0.2    -\u003e cluster-autoscaler-1.0.2\n * [new tag]             cluster-autoscaler-1.0.3    -\u003e cluster-autoscaler-1.0.3\n * [new tag]             cluster-autoscaler-1.0.4    -\u003e cluster-autoscaler-1.0.4\n * [new tag]             cluster-autoscaler-1.0.5    -\u003e cluster-autoscaler-1.0.5\n * [new tag]             cluster-autoscaler-1.1.0    -\u003e cluster-autoscaler-1.1.0\n * [new tag]             cluster-autoscaler-1.1.1    -\u003e cluster-autoscaler-1.1.1\n * [new tag]             cluster-autoscaler-1.1.2    -\u003e cluster-autoscaler-1.1.2\n * [new tag]             cluster-autoscaler-1.2.0    -\u003e cluster-autoscaler-1.2.0\n * [new tag]             cluster-autoscaler-1.2.1    -\u003e cluster-autoscaler-1.2.1\n * [new tag]             cluster-autoscaler-1.2.2    -\u003e cluster-autoscaler-1.2.2\n * [new tag]             v3.10.0                     -\u003e v3.10.0\n * [new tag]             v3.10.0-alpha.0             -\u003e v3.10.0-alpha.0\n * [new tag]             v3.10.0-rc.0                -\u003e v3.10.0-rc.0\n * [new tag]             v3.11                       -\u003e v3.11\n * [new tag]             v3.11.0                     -\u003e v3.11.0\n * [new tag]             v3.11.0-alpha.0             -\u003e v3.11.0-alpha.0\n * [new tag]             vertical-pod-autoscaler-0.1 -\u003e vertical-pod-autoscaler-0.1\n","time":"2019-09-04T11:10:44Z"}
{"command":"git fetch https://github.com/openshift/kubernetes-autoscaler.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/kubernetes-autoscaler\n * branch                master     -\u003e FETCH_HEAD\n","time":"2019-09-04T11:10:44Z"}
{"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n  git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 18a08df11... Merge pull request #114 from ingvagabund/goimports-makefile\n","time":"2019-09-04T11:10:46Z"}
{"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:46Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-04T11:10:47Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-04T11:10:47Z"}
{"component":"clonerefs","file":"prow/cmd/clonerefs/main.go:43","func":"main.main","level":"info","msg":"Finished cloning refs","time":"2019-09-04T11:10:47Z"}
+ docker run -e 'JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-cluster-autoscaler-operator-master-e2e","buildid":"276","prowjobid":"33b339da-cf04-11e9-ab71-0a58ac108d31","refs":{"org":"openshift","repo":"cluster-autoscaler-operator","repo_link":"https://github.com/openshift/cluster-autoscaler-operator","base_ref":"master","base_sha":"5408e9b6aa7c16908e7cdd5dc75d647c449601f3","base_link":"https://github.com/openshift/cluster-autoscaler-operator/commit/5408e9b6aa7c16908e7cdd5dc75d647c449601f3","pulls":[{"number":117,"author":"enxebre","sha":"b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117","commit_link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117/commits/b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","author_link":"https://github.com/enxebre"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/initupload:latest --clone-log=/data/clone.json --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin
{"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T11:10:50Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-autoscaler-operator/117/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T11:10:50Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-autoscaler-operator/117/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/276/clone-records.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T11:10:50Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-autoscaler-operator/117/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/276/started.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T11:10:50Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-autoscaler-operator/117/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/276/clone-log.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T11:10:50Z"}
{"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/276.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T11:10:50Z"}
{"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/276.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T11:10:51Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-autoscaler-operator/117/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/276/started.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T11:10:51Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-autoscaler-operator/117/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/276/clone-records.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T11:10:51Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-autoscaler-operator/117/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/276/clone-log.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T11:10:51Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_cluster-autoscaler-operator/117/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T11:10:51Z"}
{"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T11:10:51Z"}
{"component":"initupload","file":"prow/gcsupload/run.go:65","func":"k8s.io/test-infra/prow/gcsupload.Options.Run","level":"info","msg":"Finished upload to GCS","time":"2019-09-04T11:10:51Z"}
+ sudo chmod -R a+rwX /data
+ sudo chown -R origin:origin-git /data
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: SYNC REPOSITORIES [00h 01m 19s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins2086313445131572270.sh
########## STARTING STAGE: FORWARD PARAMETERS TO THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod o+rw /etc/environment
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_NAME=pull-ci-openshift-cluster-autoscaler-operator-master-e2e'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_NUMBER=276'\'' >> /etc/environment'
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: FORWARD PARAMETERS TO THE REMOTE HOST [00h 00m 01s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins5250809401696373379.sh
########## STARTING STAGE: INSTALL MINIKUBE ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.qKZqT3vnz7
+ cat
+ chmod +x /tmp/tmp.qKZqT3vnz7
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.qKZqT3vnz7 openshiftdevel:/tmp/tmp.qKZqT3vnz7
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.qKZqT3vnz7"'
+ cd /home/origin
+ curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.30.0/minikube-linux-amd64
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
  0 40.3M    0   638    0     0    785      0 14:58:00 --:--:-- 14:58:00   784
100 40.3M  100 40.3M    0     0  28.8M      0  0:00:01  0:00:01 --:--:-- 28.8M
+ chmod +x minikube
+ sudo mv minikube /usr/bin/
+ curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.10.0/bin/linux/amd64/kubectl
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
 12 51.7M   12 6784k    0     0  19.7M      0  0:00:02 --:--:--  0:00:02 19.6M
100 51.7M  100 51.7M    0     0  66.3M      0 --:--:-- --:--:-- --:--:-- 66.3M
+ chmod +x kubectl
+ sudo mv kubectl /usr/bin/
+ sudo yum install -y ebtables
Loaded plugins: amazon-id, rhui-lb, search-disabled-repos
Resolving Dependencies
--> Running transaction check
---> Package ebtables.x86_64 0:2.0.10-16.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

================================================================================
 Package     Arch      Version           Repository                        Size
================================================================================
Installing:
 ebtables    x86_64    2.0.10-16.el7     oso-rhui-rhel-server-releases    123 k

Transaction Summary
================================================================================
Install  1 Package

Total download size: 123 k
Installed size: 343 k
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : ebtables-2.0.10-16.el7.x86_64                                1/1 
  Verifying  : ebtables-2.0.10-16.el7.x86_64                                1/1 

Installed:
  ebtables.x86_64 0:2.0.10-16.el7                                               

Complete!
+ VERSION=v1.13.0
+ wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz
--2019-09-04 11:12:30--  https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz
Resolving github.com (github.com)... 192.30.253.112
Connecting to github.com (github.com)|192.30.253.112|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://github-production-release-asset-2e65be.s3.amazonaws.com/80172100/61627180-fed9-11e8-9958-15e7eb90aa9e?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20190904%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20190904T111230Z&X-Amz-Expires=300&X-Amz-Signature=5103e2bd5dfa6d1bb7e54e12337239b6091b4a3d9de84629d5c9e2dd02c209f1&X-Amz-SignedHeaders=host&actor_id=0&response-content-disposition=attachment%3B%20filename%3Dcrictl-v1.13.0-linux-amd64.tar.gz&response-content-type=application%2Foctet-stream [following]
--2019-09-04 11:12:30--  https://github-production-release-asset-2e65be.s3.amazonaws.com/80172100/61627180-fed9-11e8-9958-15e7eb90aa9e?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20190904%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20190904T111230Z&X-Amz-Expires=300&X-Amz-Signature=5103e2bd5dfa6d1bb7e54e12337239b6091b4a3d9de84629d5c9e2dd02c209f1&X-Amz-SignedHeaders=host&actor_id=0&response-content-disposition=attachment%3B%20filename%3Dcrictl-v1.13.0-linux-amd64.tar.gz&response-content-type=application%2Foctet-stream
Resolving github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)... 52.216.109.35
Connecting to github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)|52.216.109.35|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 10631149 (10M) [application/octet-stream]
Saving to: ‘crictl-v1.13.0-linux-amd64.tar.gz’

     0K .......... .......... .......... .......... ..........  0% 88.3M 0s
    50K .......... .......... .......... .......... ..........  0% 81.7M 0s
   100K .......... .......... .......... .......... ..........  1% 93.2M 0s
   150K .......... .......... .......... .......... ..........  1% 85.5M 0s
   200K .......... .......... .......... .......... ..........  2% 82.2M 0s
   250K .......... .......... .......... .......... ..........  2% 80.6M 0s
   300K .......... .......... .......... .......... ..........  3% 88.2M 0s
   350K .......... .......... .......... .......... ..........  3% 73.5M 0s
   400K .......... .......... .......... .......... ..........  4% 90.7M 0s
   450K .......... .......... .......... .......... ..........  4% 73.5M 0s
   500K .......... .......... .......... .......... ..........  5% 77.9M 0s
   550K .......... .......... .......... .......... ..........  5% 75.1M 0s
   600K .......... .......... .......... .......... ..........  6% 77.3M 0s
   650K .......... .......... .......... .......... ..........  6% 65.7M 0s
   700K .......... .......... .......... .......... ..........  7% 82.9M 0s
   750K .......... .......... .......... .......... ..........  7% 75.0M 0s
   800K .......... .......... .......... .......... ..........  8% 78.6M 0s
   850K .......... .......... .......... .......... ..........  8% 78.6M 0s
   900K .......... .......... .......... .......... ..........  9% 80.8M 0s
   950K .......... .......... .......... .......... ..........  9% 74.8M 0s
  1000K .......... .......... .......... .......... .......... 10% 67.8M 0s
  1050K .......... .......... .......... .......... .......... 10%  118M 0s
  1100K .......... .......... .......... .......... .......... 11% 65.8M 0s
  1150K .......... .......... .......... .......... .......... 11% 83.1M 0s
  1200K .......... .......... .......... .......... .......... 12% 74.6M 0s
  1250K .......... .......... .......... .......... .......... 12% 78.2M 0s
  1300K .......... .......... .......... .......... .......... 13% 92.8M 0s
  1350K .......... .......... .......... .......... .......... 13% 70.8M 0s
  1400K .......... .......... .......... .......... .......... 13% 69.2M 0s
  1450K .......... .......... .......... .......... .......... 14% 89.8M 0s
  1500K .......... .......... .......... .......... .......... 14% 71.0M 0s
  1550K .......... .......... .......... .......... .......... 15% 87.7M 0s
  1600K .......... .......... .......... .......... .......... 15% 84.8M 0s
  1650K .......... .......... .......... .......... .......... 16% 73.6M 0s
  1700K .......... .......... .......... .......... .......... 16% 65.2M 0s
  1750K .......... .......... .......... .......... .......... 17% 99.1M 0s
  1800K .......... .......... .......... .......... .......... 17% 85.9M 0s
  1850K .......... .......... .......... .......... .......... 18% 68.0M 0s
  1900K .......... .......... .......... .......... .......... 18% 73.9M 0s
  1950K .......... .......... .......... .......... .......... 19% 80.7M 0s
  2000K .......... .......... .......... .......... .......... 19% 84.5M 0s
  2050K .......... .......... .......... .......... .......... 20% 79.5M 0s
  2100K .......... .......... .......... .......... .......... 20% 75.3M 0s
  2150K .......... .......... .......... .......... .......... 21% 85.1M 0s
  2200K .......... .......... .......... .......... .......... 21% 95.4M 0s
  2250K .......... .......... .......... .......... .......... 22% 78.6M 0s
  2300K .......... .......... .......... .......... .......... 22%  115M 0s
  2350K .......... .......... .......... .......... .......... 23% 65.3M 0s
  2400K .......... .......... .......... .......... .......... 23%  105M 0s
  2450K .......... .......... .......... .......... .......... 24% 68.2M 0s
  2500K .......... .......... .......... .......... .......... 24% 74.1M 0s
  2550K .......... .......... .......... .......... .......... 25% 71.4M 0s
  2600K .......... .......... .......... .......... .......... 25% 74.6M 0s
  2650K .......... .......... .......... .......... .......... 26% 88.7M 0s
  2700K .......... .......... .......... .......... .......... 26% 73.3M 0s
  2750K .......... .......... .......... .......... .......... 26% 86.3M 0s
  2800K .......... .......... .......... .......... .......... 27% 73.6M 0s
  2850K .......... .......... .......... .......... .......... 27% 76.0M 0s
  2900K .......... .......... .......... .......... .......... 28% 86.8M 0s
  2950K .......... .......... .......... .......... .......... 28% 72.9M 0s
  3000K .......... .......... .......... .......... .......... 29% 75.6M 0s
  3050K .......... .......... .......... .......... .......... 29% 72.7M 0s
  3100K .......... .......... .......... .......... .......... 30% 95.6M 0s
  3150K .......... .......... .......... .......... .......... 30%  196M 0s
  3200K .......... .......... .......... .......... .......... 31% 91.5M 0s
  3250K .......... .......... .......... .......... .......... 31%  124M 0s
  3300K .......... .......... .......... .......... .......... 32% 93.7M 0s
  3350K .......... .......... .......... .......... .......... 32% 99.1M 0s
  3400K .......... .......... .......... .......... .......... 33% 81.7M 0s
  3450K .......... .......... .......... .......... .......... 33%  112M 0s
  3500K .......... .......... .......... .......... .......... 34%  150M 0s
  3550K .......... .......... .......... .......... .......... 34% 79.2M 0s
  3600K .......... .......... .......... .......... .......... 35%  140M 0s
  3650K .......... .......... .......... .......... .......... 35% 98.2M 0s
  3700K .......... .......... .......... .......... .......... 36% 96.0M 0s
  3750K .......... .......... .......... .......... .......... 36%  129M 0s
  3800K .......... .......... .......... .......... .......... 37% 80.9M 0s
  3850K .......... .......... .......... .......... .......... 37%  110M 0s
  3900K .......... .......... .......... .......... .......... 38%  123M 0s
  3950K .......... .......... .......... .......... .......... 38%  118M 0s
  4000K .......... .......... .......... .......... .......... 39%  144M 0s
  4050K .......... .......... .......... .......... .......... 39%  138M 0s
  4100K .......... .......... .......... .......... .......... 39%  134M 0s
  4150K .......... .......... .......... .......... .......... 40%  144M 0s
  4200K .......... .......... .......... .......... .......... 40%  128M 0s
  4250K .......... .......... .......... .......... .......... 41% 94.8M 0s
  4300K .......... .......... .......... .......... .......... 41% 87.9M 0s
  4350K .......... .......... .......... .......... .......... 42% 83.2M 0s
  4400K .......... .......... .......... .......... .......... 42% 85.4M 0s
  4450K .......... .......... .......... .......... .......... 43% 96.8M 0s
  4500K .......... .......... .......... .......... .......... 43% 80.2M 0s
  4550K .......... .......... .......... .......... .......... 44% 92.2M 0s
  4600K .......... .......... .......... .......... .......... 44% 85.2M 0s
  4650K .......... .......... .......... .......... .......... 45% 86.9M 0s
  4700K .......... .......... .......... .......... .......... 45% 85.3M 0s
  4750K .......... .......... .......... .......... .......... 46% 87.1M 0s
  4800K .......... .......... .......... .......... .......... 46% 92.7M 0s
  4850K .......... .......... .......... .......... .......... 47% 99.3M 0s
  4900K .......... .......... .......... .......... .......... 47%  103M 0s
  4950K .......... .......... .......... .......... .......... 48% 87.6M 0s
  5000K .......... .......... .......... .......... .......... 48% 94.9M 0s
  5050K .......... .......... .......... .......... .......... 49% 86.3M 0s
  5100K .......... .......... .......... .......... .......... 49% 96.4M 0s
  5150K .......... .......... .......... .......... .......... 50% 79.0M 0s
  5200K .......... .......... .......... .......... .......... 50% 95.9M 0s
  5250K .......... .......... .......... .......... .......... 51% 90.6M 0s
  5300K .......... .......... .......... .......... .......... 51%  105M 0s
  5350K .......... .......... .......... .......... .......... 52% 85.9M 0s
  5400K .......... .......... .......... .......... .......... 52% 86.4M 0s
  5450K .......... .......... .......... .......... .......... 52% 83.9M 0s
  5500K .......... .......... .......... .......... .......... 53% 76.0M 0s
  5550K .......... .......... .......... .......... .......... 53% 88.4M 0s
  5600K .......... .......... .......... .......... .......... 54% 79.2M 0s
  5650K .......... .......... .......... .......... .......... 54% 86.7M 0s
  5700K .......... .......... .......... .......... .......... 55% 97.2M 0s
  5750K .......... .......... .......... .......... .......... 55%  102M 0s
  5800K .......... .......... .......... .......... .......... 56% 86.5M 0s
  5850K .......... .......... .......... .......... .......... 56% 88.6M 0s
  5900K .......... .......... .......... .......... .......... 57% 94.3M 0s
  5950K .......... .......... .......... .......... .......... 57% 89.3M 0s
  6000K .......... .......... .......... .......... .......... 58% 95.0M 0s
  6050K .......... .......... .......... .......... .......... 58% 85.5M 0s
  6100K .......... .......... .......... .......... .......... 59% 84.8M 0s
  6150K .......... .......... .......... .......... .......... 59% 86.7M 0s
  6200K .......... .......... .......... .......... .......... 60% 94.7M 0s
  6250K .......... .......... .......... .......... .......... 60% 77.5M 0s
  6300K .......... .......... .......... .......... .......... 61% 95.0M 0s
  6350K .......... .......... .......... .......... .......... 61% 85.4M 0s
  6400K .......... .......... .......... .......... .......... 62% 88.1M 0s
  6450K .......... .......... .......... .......... .......... 62% 94.4M 0s
  6500K .......... .......... .......... .......... .......... 63% 84.6M 0s
  6550K .......... .......... .......... .......... .......... 63% 93.6M 0s
  6600K .......... .......... .......... .......... .......... 64% 94.2M 0s
  6650K .......... .......... .......... .......... .......... 64% 86.7M 0s
  6700K .......... .......... .......... .......... .......... 65% 83.3M 0s
  6750K .......... .......... .......... .......... .......... 65% 89.0M 0s
  6800K .......... .......... .......... .......... .......... 65% 81.7M 0s
  6850K .......... .......... .......... .......... .......... 66% 85.6M 0s
  6900K .......... .......... .......... .......... .......... 66% 97.2M 0s
  6950K .......... .......... .......... .......... .......... 67% 84.2M 0s
  7000K .......... .......... .......... .......... .......... 67% 98.5M 0s
  7050K .......... .......... .......... .......... .......... 68% 85.8M 0s
  7100K .......... .......... .......... .......... .......... 68% 91.3M 0s
  7150K .......... .......... .......... .......... .......... 69% 84.1M 0s
  7200K .......... .......... .......... .......... .......... 69% 93.8M 0s
  7250K .......... .......... .......... .......... .......... 70% 89.6M 0s
  7300K .......... .......... .......... .......... .......... 70% 81.5M 0s
  7350K .......... .......... .......... .......... .......... 71% 98.8M 0s
  7400K .......... .......... .......... .......... .......... 71% 99.1M 0s
  7450K .......... .......... .......... .......... .......... 72%  103M 0s
  7500K .......... .......... .......... .......... .......... 72% 86.0M 0s
  7550K .......... .......... .......... .......... .......... 73% 82.7M 0s
  7600K .......... .......... .......... .......... .......... 73% 78.3M 0s
  7650K .......... .......... .......... .......... .......... 74% 97.2M 0s
  7700K .......... .......... .......... .......... .......... 74% 84.1M 0s
  7750K .......... .......... .......... .......... .......... 75% 87.9M 0s
  7800K .......... .......... .......... .......... .......... 75% 81.7M 0s
  7850K .......... .......... .......... .......... .......... 76%  115M 0s
  7900K .......... .......... .......... .......... .......... 76% 79.0M 0s
  7950K .......... .......... .......... .......... .......... 77% 83.0M 0s
  8000K .......... .......... .......... .......... .......... 77% 88.1M 0s
  8050K .......... .......... .......... .......... .......... 78% 90.8M 0s
  8100K .......... .......... .......... .......... .......... 78% 85.8M 0s
  8150K .......... .......... .......... .......... .......... 78% 84.5M 0s
  8200K .......... .......... .......... .......... .......... 79% 89.5M 0s
  8250K .......... .......... .......... .......... .......... 79% 97.1M 0s
  8300K .......... .......... .......... .......... .......... 80% 97.5M 0s
  8350K .......... .......... .......... .......... .......... 80% 87.1M 0s
  8400K .......... .......... .......... .......... .......... 81% 96.3M 0s
  8450K .......... .......... .......... .......... .......... 81% 87.0M 0s
  8500K .......... .......... .......... .......... .......... 82% 88.7M 0s
  8550K .......... .......... .......... .......... .......... 82% 97.2M 0s
  8600K .......... .......... .......... .......... .......... 83% 83.2M 0s
  8650K .......... .......... .......... .......... .......... 83% 95.7M 0s
  8700K .......... .......... .......... .......... .......... 84% 90.7M 0s
  8750K .......... .......... .......... .......... .......... 84% 99.4M 0s
  8800K .......... .......... .......... .......... .......... 85% 83.9M 0s
  8850K .......... .......... .......... .......... .......... 85% 94.8M 0s
  8900K .......... .......... .......... .......... .......... 86% 79.1M 0s
  8950K .......... .......... .......... .......... .......... 86% 92.1M 0s
  9000K .......... .......... .......... .......... .......... 87% 85.4M 0s
  9050K .......... .......... .......... .......... .......... 87% 89.1M 0s
  9100K .......... .......... .......... .......... .......... 88%  112M 0s
  9150K .......... .......... .......... .......... .......... 88% 99.7M 0s
  9200K .......... .......... .......... .......... .......... 89% 88.2M 0s
  9250K .......... .......... .......... .......... .......... 89% 87.2M 0s
  9300K .......... .......... .......... .......... .......... 90% 96.9M 0s
  9350K .......... .......... .......... .......... .......... 90% 97.1M 0s
  9400K .......... .......... .......... .......... .......... 91% 88.7M 0s
  9450K .......... .......... .......... .......... .......... 91% 86.8M 0s
  9500K .......... .......... .......... .......... .......... 91% 92.9M 0s
  9550K .......... .......... .......... .......... .......... 92% 91.0M 0s
  9600K .......... .......... .......... .......... .......... 92% 95.8M 0s
  9650K .......... .......... .......... .......... .......... 93% 94.7M 0s
  9700K .......... .......... .......... .......... .......... 93% 98.7M 0s
  9750K .......... .......... .......... .......... .......... 94% 91.6M 0s
  9800K .......... .......... .......... .......... .......... 94% 90.9M 0s
  9850K .......... .......... .......... .......... .......... 95% 85.9M 0s
  9900K .......... .......... .......... .......... .......... 95% 85.6M 0s
  9950K .......... .......... .......... .......... .......... 96%  109M 0s
 10000K .......... .......... .......... .......... .......... 96%  113M 0s
 10050K .......... .......... .......... .......... .......... 97% 86.0M 0s
 10100K .......... .......... .......... .......... .......... 97% 83.7M 0s
 10150K .......... .......... .......... .......... .......... 98% 97.2M 0s
 10200K .......... .......... .......... .......... .......... 98% 85.7M 0s
 10250K .......... .......... .......... .......... .......... 99%  120M 0s
 10300K .......... .......... .......... .......... .......... 99%  156M 0s
 10350K .......... .......... .......... .                    100%  172M=0.1s

2019-09-04 11:12:30 (88.6 MB/s) - ‘crictl-v1.13.0-linux-amd64.tar.gz’ saved [10631149/10631149]

+ sudo tar zxvf crictl-v1.13.0-linux-amd64.tar.gz -C /usr/bin
crictl
+ rm -f crictl-v1.13.0-linux-amd64.tar.gz
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL MINIKUBE [00h 01m 36s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins2471185168023117403.sh
########## STARTING STAGE: DEPLOY KUBERNETES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.5TluZJgqaS
+ cat
+ chmod +x /tmp/tmp.5TluZJgqaS
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.5TluZJgqaS openshiftdevel:/tmp/tmp.5TluZJgqaS
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.5TluZJgqaS"'
+ cd /home/origin
+ sudo setenforce 0
+ sudo minikube start --vm-driver=none --extra-config=kubelet.cgroup-driver=systemd --kubernetes-version v1.12.0 --v 5
There is a newer version of minikube available (v1.3.1).  Download it here:
https://github.com/kubernetes/minikube/releases/tag/v1.3.1

To disable this notification, run the following:
minikube config set WantUpdateNotification false
Starting local Kubernetes v1.12.0 cluster...
Starting VM...
Creating CA: /root/.minikube/certs/ca.pem
Creating client certificate: /root/.minikube/certs/cert.pem
Getting VM IP address...
Moving files into cluster...
Downloading kubeadm v1.12.0
Downloading kubelet v1.12.0
Finished Downloading kubeadm v1.12.0
Finished Downloading kubelet v1.12.0
Setting up certs...
Connecting to cluster...
Setting up kubeconfig...
Starting cluster components...
Kubectl is now configured to use the cluster.
===================
WARNING: IT IS RECOMMENDED NOT TO RUN THE NONE DRIVER ON PERSONAL WORKSTATIONS
	The 'none' driver will run an insecure kubernetes apiserver as root that may leave the host vulnerable to CSRF attacks

When using the none driver, the kubectl config and credentials generated will be root owned and will appear in the root home directory.
You will need to move the files to the appropriate location and then set the correct permissions.  An example of this is below:

	sudo mv /root/.kube $HOME/.kube # this will write over any previous configuration
	sudo chown -R $USER $HOME/.kube
	sudo chgrp -R $USER $HOME/.kube

	sudo mv /root/.minikube $HOME/.minikube # this will write over any previous configuration
	sudo chown -R $USER $HOME/.minikube
	sudo chgrp -R $USER $HOME/.minikube

This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true
Loading cached images from config file.
+ sudo cp /etc/ssl/certs/ca-bundle.crt /etc/ssl/certs/ca-certificates.crt
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY KUBERNETES [00h 01m 04s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins2127843738907129102.sh
########## STARTING STAGE: INSTALL KUSTOMIZE ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.31uWJa8aG3
+ cat
+ chmod +x /tmp/tmp.31uWJa8aG3
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.31uWJa8aG3 openshiftdevel:/tmp/tmp.31uWJa8aG3
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.31uWJa8aG3"'
+ cd /home/origin
+ curl -Lo kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v2.1.0/kustomize_2.1.0_linux_amd64
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   618    0   618    0     0   2738      0 --:--:-- --:--:-- --:--:--  2746

 43 22.9M   43  9.9M    0     0  21.0M      0  0:00:01 --:--:--  0:00:01 21.0M
100 22.9M  100 22.9M    0     0  38.9M      0 --:--:-- --:--:-- --:--:--  110M
+ chmod u+x kustomize
+ sudo mv kustomize /usr/bin/kustomize
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL KUSTOMIZE [00h 00m 01s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins29058062070248032.sh
########## STARTING STAGE: INSTALL IMAGEBUILDER ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.33kARuuvwo
+ cat
+ chmod +x /tmp/tmp.33kARuuvwo
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.33kARuuvwo openshiftdevel:/tmp/tmp.33kARuuvwo
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.33kARuuvwo"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ go get -u github.com/openshift/imagebuilder/cmd/imagebuilder
+ sudo mv /data/bin/imagebuilder /usr/bin
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL IMAGEBUILDER [00h 00m 29s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins5383951893472279571.sh
########## STARTING STAGE: BUILD KUBEMARK MACHINE CONTROLLERS ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.0lk6h0f5Ym
+ cat
+ chmod +x /tmp/tmp.0lk6h0f5Ym
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.0lk6h0f5Ym openshiftdevel:/tmp/tmp.0lk6h0f5Ym
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.0lk6h0f5Ym"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/cluster-api-provider-kubemark
+ sudo make images IMAGE=docker.io/gofed/kubemark-machine-controllers VERSION=v1.0 NO_DOCKER=1
imagebuilder -t "docker.io/gofed/kubemark-machine-controllers:v1.0" -t "docker.io/gofed/kubemark-machine-controllers:latest" ./
--> Image registry.svc.ci.openshift.org/openshift/release:golang-1.10 was not found, pulling ...
--> Pulled 0/2 layers, 13% complete
--> Pulled 1/2 layers, 68% complete
--> Pulled 2/2 layers, 100% complete
--> Extracting
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.10 as builder
--> WORKDIR /go/src/github.com/openshift/cluster-api-provider-kubemark
--> COPY . .
--> RUN go build -o ./machine-controller-manager ./cmd/manager
--> RUN go build -o ./manager ./vendor/github.com/openshift/cluster-api/cmd/manager
--> Image docker.io/gofed/base:baseci was not found, pulling ...
--> Pulled 1/2 layers, 78% complete
--> Pulled 2/2 layers, 100% complete
--> Extracting
--> FROM docker.io/gofed/base:baseci as 1
--> RUN INSTALL_PKGS="       openssh       " &&     yum install -y $INSTALL_PKGS &&     rpm -V $INSTALL_PKGS &&     yum clean all &&     curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl &&     chmod +x ./kubectl &&     mv ./kubectl /bin/kubectl &&     curl -LO https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 &&     chmod +x ./jq-linux64 &&     mv ./jq-linux64 /bin/jq
Loaded plugins: fastestmirror, ovl
Determining fastest mirrors
 * base: repos-va.psychz.net
 * extras: repos-va.psychz.net
 * updates: repos-va.psychz.net
Resolving Dependencies
--> Running transaction check
---> Package openssh.x86_64 0:7.4p1-16.el7 will be installed
--> Processing Dependency: libfipscheck.so.1()(64bit) for package: openssh-7.4p1-16.el7.x86_64
--> Running transaction check
---> Package fipscheck-lib.x86_64 0:1.4.1-6.el7 will be installed
--> Processing Dependency: /usr/bin/fipscheck for package: fipscheck-lib-1.4.1-6.el7.x86_64
--> Running transaction check
---> Package fipscheck.x86_64 0:1.4.1-6.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

================================================================================
 Package               Arch           Version                Repository    Size
================================================================================
Installing:
 openssh               x86_64         7.4p1-16.el7           base         510 k
Installing for dependencies:
 fipscheck             x86_64         1.4.1-6.el7            base          21 k
 fipscheck-lib         x86_64         1.4.1-6.el7            base          11 k

Transaction Summary
================================================================================
Install  1 Package (+2 Dependent packages)

Total download size: 542 k
Installed size: 2.0 M
Downloading packages:
--------------------------------------------------------------------------------
Total                                              4.6 MB/s | 542 kB  00:00     
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : fipscheck-1.4.1-6.el7.x86_64                                 1/3 
  Installing : fipscheck-lib-1.4.1-6.el7.x86_64                             2/3 
  Installing : openssh-7.4p1-16.el7.x86_64                                  3/3 
  Verifying  : fipscheck-lib-1.4.1-6.el7.x86_64                             1/3 
  Verifying  : fipscheck-1.4.1-6.el7.x86_64                                 2/3 
  Verifying  : openssh-7.4p1-16.el7.x86_64                                  3/3 

Installed:
  openssh.x86_64 0:7.4p1-16.el7                                                 

Dependency Installed:
  fipscheck.x86_64 0:1.4.1-6.el7       fipscheck-lib.x86_64 0:1.4.1-6.el7      

Complete!
Loaded plugins: fastestmirror, ovl
Cleaning repos: base cbs-paas7-openshift-multiarch-el7-build extras updates
Cleaning up list of fastest mirrors
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
 69 40.9M   69 28.2M    0     0  58.7M      0 --:--:-- --:--:-- --:--:-- 58.7M
100 40.9M  100 40.9M    0     0  67.5M      0 --:--:-- --:--:-- --:--:-- 67.5M
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   599    0   599    0     0   2494      0 --:--:-- --:--:-- --:--:--  2506

100 2956k  100 2956k    0     0  9312k      0 --:--:-- --:--:-- --:--:-- 9312k
--> COPY --from=builder /go/src/github.com/openshift/cluster-api-provider-kubemark/manager /
--> COPY --from=builder /go/src/github.com/openshift/cluster-api-provider-kubemark/machine-controller-manager /
--> Committing changes to docker.io/gofed/kubemark-machine-controllers:v1.0 ...
--> Tagged as docker.io/gofed/kubemark-machine-controllers:latest
--> Done
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: BUILD KUBEMARK MACHINE CONTROLLERS [00h 01m 32s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins896899424635876214.sh
########## STARTING STAGE: BUILD CLUSTER AUTOSCALER ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.91jY1pKj39
+ cat
+ chmod +x /tmp/tmp.91jY1pKj39
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.91jY1pKj39 openshiftdevel:/tmp/tmp.91jY1pKj39
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.91jY1pKj39"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/kubernetes-autoscaler
+ sudo imagebuilder -f images/cluster-autoscaler/Dockerfile -t docker.io/openshift/origin-cluster-autoscaler:v4.0 .
--> Image registry.svc.ci.openshift.org/openshift/release:golang-1.12 was not found, pulling ...
--> Pulled 1/2 layers, 65% complete
--> Pulled 2/2 layers, 100% complete
--> Extracting
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder
--> WORKDIR /go/src/k8s.io/autoscaler
--> COPY . .
--> RUN go build -o cluster-autoscaler/cluster-autoscaler ./cluster-autoscaler
--> Image registry.svc.ci.openshift.org/openshift/origin-v4.0:base was not found, pulling ...
--> Pulled 2/4 layers, 60% complete
--> Pulled 3/4 layers, 81% complete
--> Pulled 4/4 layers, 100% complete
--> Extracting
--> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1
--> COPY --from=builder /go/src/k8s.io/autoscaler/cluster-autoscaler/cluster-autoscaler /usr/bin/
--> CMD /usr/bin/cluster-autoscaler
--> LABEL summary="Cluster Autoscaler for OpenShift and Kubernetes"
--> Committing changes to docker.io/openshift/origin-cluster-autoscaler:v4.0 ...
--> Done
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: BUILD CLUSTER AUTOSCALER [00h 02m 24s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins4794395534924421008.sh
########## STARTING STAGE: DEPLOY MACHINE API OPERATOR ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.nmSD0BllKL
+ cat
+ chmod +x /tmp/tmp.nmSD0BllKL
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.nmSD0BllKL openshiftdevel:/tmp/tmp.nmSD0BllKL
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.nmSD0BllKL"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/machine-api-operator
+ sudo imagebuilder -t docker.io/openshift/origin-machine-api-operator:v4.0.0 .
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder
--> WORKDIR /go/src/github.com/openshift/machine-api-operator
--> COPY . .
--> RUN NO_DOCKER=1 make build
./hack/go-build.sh machine-api-operator
Using version from git...
Building github.com/openshift/machine-api-operator/cmd/machine-api-operator (v0.1.0-524-g474e14e4)
./hack/go-build.sh nodelink-controller
Using version from git...
Building github.com/openshift/machine-api-operator/cmd/nodelink-controller (v0.1.0-524-g474e14e4)
./hack/go-build.sh machine-healthcheck
Using version from git...
Building github.com/openshift/machine-api-operator/cmd/machine-healthcheck (v0.1.0-524-g474e14e4)
--> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1
--> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/install manifests
--> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/machine-api-operator .
--> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/nodelink-controller .
--> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/machine-healthcheck .
--> LABEL io.openshift.release.operator true
--> Committing changes to docker.io/openshift/origin-machine-api-operator:v4.0.0 ...
--> Done
+ sudo make deploy-kubemark
kustomize build config | kubectl apply -f -
namespace "kubemark-actuator" created
serviceaccount "kubemark" created
clusterrole.rbac.authorization.k8s.io "kubemark-actuator-role" created
clusterrolebinding.rbac.authorization.k8s.io "kubemark-actuator-rolebinding" created
configmap "deleteunreadynodes" created
deployment.apps "machineapi-kubemark-controllers" created
kustomize build | kubectl apply -f -
namespace "openshift-machine-api" created
customresourcedefinition.apiextensions.k8s.io "clusteroperators.config.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "featuregates.config.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "machinedisruptionbudgets.healthchecking.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "machinehealthchecks.healthchecking.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "machines.machine.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "machinesets.machine.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "prometheusrules.monitoring.coreos.com" created
customresourcedefinition.apiextensions.k8s.io "servicemonitors.monitoring.coreos.com" created
serviceaccount "machine-api-controllers" created
serviceaccount "machine-api-operator" created
role.rbac.authorization.k8s.io "machine-api-controllers" created
role.rbac.authorization.k8s.io "machine-api-operator" created
role.rbac.authorization.k8s.io "prometheus-k8s-machine-api-operator" created
clusterrole.rbac.authorization.k8s.io "machine-api-controllers" created
clusterrole.rbac.authorization.k8s.io "machine-api-operator" created
rolebinding.rbac.authorization.k8s.io "machine-api-controllers" created
rolebinding.rbac.authorization.k8s.io "machine-api-operator" created
rolebinding.rbac.authorization.k8s.io "prometheus-k8s-machine-api-operator" created
clusterrolebinding.rbac.authorization.k8s.io "machine-api-controllers" created
clusterrolebinding.rbac.authorization.k8s.io "machine-api-operator" created
configmap "machine-api-operator-images" created
service "machine-api-operator" created
deployment.apps "machine-api-operator" created
clusteroperator.config.openshift.io "machine-api" created
kubectl apply -f config/kubemark-config-infra.yaml
customresourcedefinition.apiextensions.k8s.io "infrastructures.config.openshift.io" created
infrastructure.config.openshift.io "cluster" created
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY MACHINE API OPERATOR [00h 01m 15s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins1455367870770999506.sh
########## STARTING STAGE: DEPLOY CLUSTER AUTOSCALER OPERATOR ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.T8Y7fs5wLF
+ cat
+ chmod +x /tmp/tmp.T8Y7fs5wLF
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.T8Y7fs5wLF openshiftdevel:/tmp/tmp.T8Y7fs5wLF
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.T8Y7fs5wLF"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/cluster-autoscaler-operator/
+ sudo imagebuilder -t quay.io/openshift/origin-cluster-autoscaler-operator:v4.0 .
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder
--> WORKDIR /go/src/github.com/openshift/cluster-autoscaler-operator
--> COPY . .
--> ENV NO_DOCKER=1
--> ENV BUILD_DEST=/go/bin/cluster-autoscaler-operator
--> RUN unset VERSION && make build
go build  -ldflags "-X github.com/openshift/cluster-autoscaler-operator/pkg/version.Raw=v0.0.0-213-gf7ba475" -o "/go/bin/cluster-autoscaler-operator" "github.com/openshift/cluster-autoscaler-operator/cmd/manager"
--> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1
--> COPY --from=builder /go/bin/cluster-autoscaler-operator /usr/bin/
--> COPY --from=builder /go/src/github.com/openshift/cluster-autoscaler-operator/install /manifests
--> CMD ["/usr/bin/cluster-autoscaler-operator"]
--> LABEL io.openshift.release.operator true
--> Committing changes to quay.io/openshift/origin-cluster-autoscaler-operator:v4.0 ...
--> Done
+ kustomize build
+ sudo kubectl apply -f -
customresourcedefinition.apiextensions.k8s.io "clusterautoscalers.autoscaling.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "machineautoscalers.autoscaling.openshift.io" created
serviceaccount "cluster-autoscaler" created
serviceaccount "cluster-autoscaler-operator" created
role.rbac.authorization.k8s.io "cluster-autoscaler" created
role.rbac.authorization.k8s.io "prometheus-k8s-cluster-autoscaler-operator" created
role.rbac.authorization.k8s.io "cluster-autoscaler-operator" created
clusterrole.rbac.authorization.k8s.io "cluster-autoscaler" created
clusterrole.rbac.authorization.k8s.io "cluster-autoscaler-operator" created
rolebinding.rbac.authorization.k8s.io "cluster-autoscaler" created
rolebinding.rbac.authorization.k8s.io "prometheus-k8s-cluster-autoscaler-operator" created
rolebinding.rbac.authorization.k8s.io "cluster-autoscaler-operator" created
clusterrolebinding.rbac.authorization.k8s.io "cluster-autoscaler" created
clusterrolebinding.rbac.authorization.k8s.io "cluster-autoscaler-operator" created
configmap "cluster-autoscaler-operator-ca" created
secret "cluster-autoscaler-operator-cert" created
service "cluster-autoscaler-operator" created
deployment.apps "cluster-autoscaler-operator" created
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY CLUSTER AUTOSCALER OPERATOR [00h 00m 46s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins3328529090167326146.sh
########## STARTING STAGE: DEPLOY CLUSTER RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.idmrezUkLo
+ cat
+ chmod +x /tmp/tmp.idmrezUkLo
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.idmrezUkLo openshiftdevel:/tmp/tmp.idmrezUkLo
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.idmrezUkLo"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/cluster-api-provider-kubemark
+ sudo kubectl apply -f examples/machine-set.yaml
machineset.machine.openshift.io "kubemark-actuator-testing-machineset" created
+ sudo kubectl apply -f examples/static-machine.yaml
machine.machine.openshift.io "minikube-static-machine" created
+ sudo kubectl apply -f examples/worker-machinesets.yaml
machineset.machine.openshift.io "kubemark-actuator-testing-machineset-red" created
machineset.machine.openshift.io "kubemark-actuator-testing-machineset-green" created
machineset.machine.openshift.io "kubemark-actuator-testing-machineset-blue" created
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY CLUSTER RESOURCES [00h 00m 03s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins7909159307379912109.sh
########## STARTING STAGE: INSTALL GO 1.10.1 ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.FajAuCES8l
+ cat
+ chmod +x /tmp/tmp.FajAuCES8l
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.FajAuCES8l openshiftdevel:/tmp/tmp.FajAuCES8l
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.FajAuCES8l"'
+ cd /home/origin
+ mkdir -p /home/origin/bin
+ curl -sL -o /home/origin/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
+ chmod +x /home/origin/bin/gimme
+ gimme 1.10.1

unset GOOS;
unset GOARCH;
export GOROOT='/home/origin/.gimme/versions/go1.10.1.linux.amd64';
export PATH="/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:${PATH}";
go version >&2;

export GIMME_ENV="/home/origin/.gimme/envs/go1.10.1.env"
+ source /home/origin/.gimme/envs/go1.10.1.env
++ unset GOOS
++ unset GOARCH
++ export GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64
++ GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64
++ export PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin
++ PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin
++ go version
go version go1.10.1 linux/amd64
+ sudo cp /home/origin/.gimme/versions/go1.10.1.linux.amd64/bin/go /bin/go
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL GO 1.10.1 [00h 00m 08s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins131327965224125658.sh
########## STARTING STAGE: RUN E2E TESTS ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.9CPLO4W9pF
+ cat
+ chmod +x /tmp/tmp.9CPLO4W9pF
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.9CPLO4W9pF openshiftdevel:/tmp/tmp.9CPLO4W9pF
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.9CPLO4W9pF"'
+ cd /home/origin
+ set +x
go version go1.10.1 linux/amd64
hack/e2e.sh
make[1]: Entering directory `/tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg'
# Run operator tests first to preserve logs for troubleshooting test
# failures and flakes.
# Feature:Operator tests remove deployments. Thus loosing all the logs
# previously acquired.
hack/ci-integration.sh  -ginkgo.v -ginkgo.noColor=true -ginkgo.focus "Feature:Operators" -ginkgo.failFast
=== RUN   TestE2E
Running Suite: Machine Suite
============================
Random Seed: 1567596073
Will run 7 of 16 specs

[Feature:Operators] Machine API cluster operator status should 
  be available
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:53
I0904 11:21:13.296328   31231 framework.go:406] >>> kubeConfig: /root/.kube/config
•
------------------------------
[Feature:Operators] Cluster autoscaler operator should 
  reject invalid ClusterAutoscaler resources early via webhook
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:33
I0904 11:21:13.327831   31231 framework.go:406] >>> kubeConfig: /root/.kube/config
•
------------------------------
[Feature:Operators] Cluster autoscaler operator should 
  reject invalid MachineAutoscaler resources early via webhook
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:49
I0904 11:21:13.353805   31231 framework.go:406] >>> kubeConfig: /root/.kube/config
•S
------------------------------
[Feature:Operators] Cluster autoscaler operator deployment should 
  be available
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:79
I0904 11:21:13.368052   31231 framework.go:406] >>> kubeConfig: /root/.kube/config
I0904 11:21:13.390114   31231 deloyment.go:58] Deployment "cluster-autoscaler-operator" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0)
•
------------------------------
[Feature:Operators] Machine API operator deployment should 
  be available
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:18
I0904 11:21:13.390201   31231 framework.go:406] >>> kubeConfig: /root/.kube/config
I0904 11:21:13.402035   31231 deloyment.go:58] Deployment "machine-api-operator" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0)
•
------------------------------
[Feature:Operators] Machine API operator deployment should 
  reconcile controllers deployment
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:25
I0904 11:21:13.402146   31231 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: checking deployment "machine-api-controllers" is available
I0904 11:21:13.415492   31231 deloyment.go:58] Deployment "machine-api-controllers" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0)
STEP: deleting deployment "machine-api-controllers"
STEP: checking deployment "machine-api-controllers" is available again
E0904 11:21:13.424132   31231 deloyment.go:25] Error querying api for Deployment object "machine-api-controllers": deployments.apps "machine-api-controllers" not found, retrying...
E0904 11:21:14.427695   31231 deloyment.go:55] Deployment "machine-api-controllers" is not available. Status: (replicas: 1, updated: 1, ready: 0, available: 0, unavailable: 1)
I0904 11:21:15.430997   31231 deloyment.go:58] Deployment "machine-api-controllers" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0)
•
------------------------------
[Feature:Operators] Cluster autoscaler cluster operator status should 
  be available
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:90
I0904 11:21:15.431078   31231 framework.go:406] >>> kubeConfig: /root/.kube/config
•SSSSSSSS
Ran 7 of 16 Specs in 2.149 seconds
SUCCESS! -- 7 Passed | 0 Failed | 0 Pending | 9 Skipped
--- PASS: TestE2E (2.15s)
PASS
ok  	github.com/openshift/cluster-api-actuator-pkg/pkg/e2e	2.195s
hack/ci-integration.sh  -ginkgo.v -ginkgo.noColor=true -ginkgo.skip "Feature:Operators|TechPreview" -ginkgo.failFast -ginkgo.seed=1
=== RUN   TestE2E
Running Suite: Machine Suite
============================
Random Seed: 1
Will run 7 of 16 specs

SSSSSSSS
------------------------------
[Feature:Machines] Autoscaler should 
  scale up and down
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:233
I0904 11:21:18.541875   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
I0904 11:21:18.546461   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
I0904 11:21:18.569071   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: Getting existing machinesets
STEP: Getting existing machines
STEP: Getting existing nodes
I0904 11:21:18.581530   31689 autoscaler.go:283] Have 4 existing machinesets
I0904 11:21:18.581553   31689 autoscaler.go:284] Have 5 existing machines
I0904 11:21:18.581560   31689 autoscaler.go:285] Have 5 existing nodes
STEP: Creating 3 transient machinesets
STEP: [15m0s remaining] Waiting for nodes to be Ready in 3 transient machinesets
E0904 11:21:18.603947   31689 utils.go:157] Machine "e2e-1da80-w-0-6c589" has no NodeRef
STEP: [14m57s remaining] Waiting for nodes to be Ready in 3 transient machinesets
I0904 11:21:21.698815   31689 utils.go:165] Machine "e2e-1da80-w-0-6c589" is backing node "4298df12-a5cb-4a86-a580-159eb479c4a5"
I0904 11:21:21.698846   31689 utils.go:149] MachineSet "e2e-1da80-w-0" have 1 nodes
I0904 11:21:21.743401   31689 utils.go:165] Machine "e2e-1da80-w-1-khf5z" is backing node "6099b7bf-6c51-4bb5-99d6-475c3a0cdb75"
I0904 11:21:21.743432   31689 utils.go:149] MachineSet "e2e-1da80-w-1" have 1 nodes
E0904 11:21:21.754308   31689 utils.go:157] Machine "e2e-1da80-w-2-f2xrd" has no NodeRef
STEP: [14m54s remaining] Waiting for nodes to be Ready in 3 transient machinesets
I0904 11:21:24.762978   31689 utils.go:165] Machine "e2e-1da80-w-0-6c589" is backing node "4298df12-a5cb-4a86-a580-159eb479c4a5"
I0904 11:21:24.763002   31689 utils.go:149] MachineSet "e2e-1da80-w-0" have 1 nodes
I0904 11:21:24.768620   31689 utils.go:165] Machine "e2e-1da80-w-1-khf5z" is backing node "6099b7bf-6c51-4bb5-99d6-475c3a0cdb75"
I0904 11:21:24.768646   31689 utils.go:149] MachineSet "e2e-1da80-w-1" have 1 nodes
I0904 11:21:24.774126   31689 utils.go:165] Machine "e2e-1da80-w-2-f2xrd" is backing node "22efef53-29f9-4f0f-a3f7-af3147b31bdd"
I0904 11:21:24.774151   31689 utils.go:149] MachineSet "e2e-1da80-w-2" have 1 nodes
I0904 11:21:24.774189   31689 utils.go:177] Node "4298df12-a5cb-4a86-a580-159eb479c4a5" is ready. Conditions are: [{OutOfDisk False 2019-09-04 11:21:23 +0000 UTC 2019-09-04 11:21:21 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 11:21:23 +0000 UTC 2019-09-04 11:21:21 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 11:21:23 +0000 UTC 2019-09-04 11:21:21 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 11:21:23 +0000 UTC 2019-09-04 11:21:21 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 11:21:23 +0000 UTC 2019-09-04 11:21:21 +0000 UTC KubeletReady kubelet is posting ready status}]
I0904 11:21:24.774270   31689 utils.go:177] Node "6099b7bf-6c51-4bb5-99d6-475c3a0cdb75" is ready. Conditions are: [{OutOfDisk False 2019-09-04 11:21:23 +0000 UTC 2019-09-04 11:21:21 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 11:21:23 +0000 UTC 2019-09-04 11:21:21 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 11:21:23 +0000 UTC 2019-09-04 11:21:21 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 11:21:23 +0000 UTC 2019-09-04 11:21:21 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 11:21:23 +0000 UTC 2019-09-04 11:21:21 +0000 UTC KubeletReady kubelet is posting ready status}]
I0904 11:21:24.774379   31689 utils.go:177] Node "22efef53-29f9-4f0f-a3f7-af3147b31bdd" is ready. Conditions are: [{OutOfDisk False 2019-09-04 11:21:24 +0000 UTC 2019-09-04 11:21:22 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 11:21:24 +0000 UTC 2019-09-04 11:21:22 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 11:21:24 +0000 UTC 2019-09-04 11:21:22 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 11:21:24 +0000 UTC 2019-09-04 11:21:22 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 11:21:24 +0000 UTC 2019-09-04 11:21:22 +0000 UTC KubeletReady kubelet is posting ready status}]
STEP: Getting nodes
STEP: Creating 3 machineautoscalers
I0904 11:21:24.777723   31689 autoscaler.go:337] Create MachineAutoscaler backed by MachineSet kube-system/e2e-1da80-w-0 - min:1, max:2
I0904 11:21:24.785138   31689 autoscaler.go:337] Create MachineAutoscaler backed by MachineSet kube-system/e2e-1da80-w-1 - min:1, max:2
I0904 11:21:24.790208   31689 autoscaler.go:337] Create MachineAutoscaler backed by MachineSet kube-system/e2e-1da80-w-2 - min:1, max:2
STEP: Creating ClusterAutoscaler configured with maxNodesTotal:10
STEP: Deriving Memory capacity from machine "kubemark-actuator-testing-machineset"
I0904 11:21:24.905663   31689 autoscaler.go:374] Memory capacity of worker node "166234dd-4999-47ec-b113-6931509cece9" is 3840Mi
STEP: Creating scale-out workload: jobs: 11, memory: 2818572300
I0904 11:21:24.932305   31689 autoscaler.go:396] [15m0s remaining] Expecting 2 "ScaledUpGroup" events; observed 0
I0904 11:21:25.920519   31689 autoscaler.go:358] cluster-autoscaler: cluster-autoscaler-default-598c649f66-7rbkx became leader
I0904 11:21:27.932553   31689 autoscaler.go:396] [14m57s remaining] Expecting 2 "ScaledUpGroup" events; observed 0
I0904 11:21:30.933197   31689 autoscaler.go:396] [14m54s remaining] Expecting 2 "ScaledUpGroup" events; observed 0
I0904 11:21:33.933430   31689 autoscaler.go:396] [14m51s remaining] Expecting 2 "ScaledUpGroup" events; observed 0
I0904 11:21:36.077048   31689 autoscaler.go:358] cluster-autoscaler-status: Max total nodes in cluster reached: 10
I0904 11:21:36.078816   31689 autoscaler.go:358] cluster-autoscaler-status: Scale-up: setting group kube-system/e2e-1da80-w-1 size to 2
I0904 11:21:36.089127   31689 autoscaler.go:358] workload-lznjj: pod triggered scale-up: [{kube-system/e2e-1da80-w-1 1->2 (max: 2)}]
I0904 11:21:36.095772   31689 autoscaler.go:358] cluster-autoscaler-status: Scale-up: group kube-system/e2e-1da80-w-1 size set to 2
I0904 11:21:36.102734   31689 autoscaler.go:358] workload-g6j9j: pod triggered scale-up: [{kube-system/e2e-1da80-w-1 1->2 (max: 2)}]
I0904 11:21:36.107747   31689 autoscaler.go:358] workload-v97gq: pod triggered scale-up: [{kube-system/e2e-1da80-w-1 1->2 (max: 2)}]
I0904 11:21:36.111364   31689 autoscaler.go:358] workload-thqdf: pod triggered scale-up: [{kube-system/e2e-1da80-w-1 1->2 (max: 2)}]
I0904 11:21:36.119918   31689 autoscaler.go:358] workload-qhq29: pod triggered scale-up: [{kube-system/e2e-1da80-w-1 1->2 (max: 2)}]
I0904 11:21:36.123795   31689 autoscaler.go:358] workload-72l8n: pod triggered scale-up: [{kube-system/e2e-1da80-w-1 1->2 (max: 2)}]
I0904 11:21:36.126080   31689 autoscaler.go:358] workload-vqh28: pod triggered scale-up: [{kube-system/e2e-1da80-w-1 1->2 (max: 2)}]
I0904 11:21:36.276609   31689 autoscaler.go:358] workload-njgzw: pod triggered scale-up: [{kube-system/e2e-1da80-w-1 1->2 (max: 2)}]
I0904 11:21:36.933645   31689 autoscaler.go:396] [14m48s remaining] Expecting 2 "ScaledUpGroup" events; observed 1
I0904 11:21:39.934653   31689 autoscaler.go:396] [14m45s remaining] Expecting 2 "ScaledUpGroup" events; observed 1
I0904 11:21:42.934882   31689 autoscaler.go:396] [14m42s remaining] Expecting 2 "ScaledUpGroup" events; observed 1
I0904 11:21:45.935131   31689 autoscaler.go:396] [14m39s remaining] Expecting 2 "ScaledUpGroup" events; observed 1
I0904 11:21:46.102392   31689 autoscaler.go:358] cluster-autoscaler-status: Scale-up: setting group kube-system/e2e-1da80-w-0 size to 2
I0904 11:21:46.111248   31689 autoscaler.go:358] workload-v97gq: pod triggered scale-up: [{kube-system/e2e-1da80-w-0 1->2 (max: 2)}]
I0904 11:21:46.115274   31689 autoscaler.go:358] cluster-autoscaler-status: Scale-up: group kube-system/e2e-1da80-w-0 size set to 2
I0904 11:21:46.117297   31689 autoscaler.go:358] workload-qhq29: pod triggered scale-up: [{kube-system/e2e-1da80-w-0 1->2 (max: 2)}]
I0904 11:21:46.121886   31689 autoscaler.go:358] workload-vqh28: pod triggered scale-up: [{kube-system/e2e-1da80-w-0 1->2 (max: 2)}]
I0904 11:21:46.127055   31689 autoscaler.go:358] workload-72l8n: pod triggered scale-up: [{kube-system/e2e-1da80-w-0 1->2 (max: 2)}]
I0904 11:21:46.129344   31689 autoscaler.go:358] workload-thqdf: pod triggered scale-up: [{kube-system/e2e-1da80-w-0 1->2 (max: 2)}]
I0904 11:21:46.131534   31689 autoscaler.go:358] workload-lznjj: pod triggered scale-up: [{kube-system/e2e-1da80-w-0 1->2 (max: 2)}]
I0904 11:21:46.136155   31689 autoscaler.go:358] workload-g6j9j: pod triggered scale-up: [{kube-system/e2e-1da80-w-0 1->2 (max: 2)}]
I0904 11:21:48.935321   31689 autoscaler.go:396] [14m36s remaining] Expecting 2 "ScaledUpGroup" events; observed 2
I0904 11:21:48.935914   31689 autoscaler.go:411] [1m0s remaining] Waiting for cluster-autoscaler to generate a "MaxNodesTotalReached" event; observed 1
I0904 11:21:48.935931   31689 autoscaler.go:419] [1m0s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:21:51.936841   31689 autoscaler.go:419] [57s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:21:54.937572   31689 autoscaler.go:419] [54s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:21:57.937822   31689 autoscaler.go:419] [51s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:00.938042   31689 autoscaler.go:419] [48s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:03.939203   31689 autoscaler.go:419] [45s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:06.939343   31689 autoscaler.go:419] [42s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:09.939580   31689 autoscaler.go:419] [39s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:12.939816   31689 autoscaler.go:419] [36s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:15.940083   31689 autoscaler.go:419] [33s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:18.940259   31689 autoscaler.go:419] [30s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:21.940479   31689 autoscaler.go:419] [27s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:24.940753   31689 autoscaler.go:419] [24s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:27.940956   31689 autoscaler.go:419] [21s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:30.941213   31689 autoscaler.go:419] [18s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:33.941482   31689 autoscaler.go:419] [15s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:36.941719   31689 autoscaler.go:419] [12s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:39.942283   31689 autoscaler.go:419] [9s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:42.942517   31689 autoscaler.go:419] [6s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0904 11:22:45.942788   31689 autoscaler.go:419] [3s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
STEP: Deleting workload
I0904 11:22:48.947960   31689 autoscaler.go:433] [15m0s remaining] Expecting 2 "ScaleDownEmpty" events; observed 2
STEP: Scaling transient machinesets to zero
I0904 11:22:48.948005   31689 autoscaler.go:440] Scaling transient machineset "e2e-1da80-w-0" to zero
I0904 11:22:48.963142   31689 autoscaler.go:440] Scaling transient machineset "e2e-1da80-w-1" to zero
I0904 11:22:48.985850   31689 autoscaler.go:440] Scaling transient machineset "e2e-1da80-w-2" to zero
STEP: Waiting for scaled up nodes to be deleted
I0904 11:22:49.029485   31689 autoscaler.go:457] [15m0s remaining] Waiting for cluster to reach original node count of 5; currently have 10
I0904 11:22:52.034833   31689 autoscaler.go:457] [14m57s remaining] Waiting for cluster to reach original node count of 5; currently have 10
I0904 11:22:55.037992   31689 autoscaler.go:457] [14m54s remaining] Waiting for cluster to reach original node count of 5; currently have 5
STEP: Waiting for scaled up machines to be deleted
I0904 11:22:55.041671   31689 autoscaler.go:467] [15m0s remaining] Waiting for cluster to reach original machine count of 5; currently have 5

• [SLOW TEST:96.571 seconds]
[Feature:Machines] Autoscaler should
/tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:232
  scale up and down
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:233
------------------------------
S
------------------------------
[Feature:Machines] Managed cluster should 
  have machines linked with nodes
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:136
I0904 11:22:55.112756   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
I0904 11:22:55.139287   31689 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0904 11:22:55.139322   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-8drlz" is linked to node "595f4b23-c3ad-4106-bf5c-21ec235eb424"
I0904 11:22:55.139337   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-kx7wv" is linked to node "cbe9202c-babb-4a83-bd70-8f21d75df033"
I0904 11:22:55.139349   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-rchkh" is linked to node "166234dd-4999-47ec-b113-6931509cece9"
I0904 11:22:55.139362   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-kpmfw" is linked to node "d1f145c4-1ed2-493f-97af-c24f5256cf46"
I0904 11:22:55.139374   31689 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"
•
------------------------------
[Feature:Machines] Managed cluster should 
  have ability to additively reconcile taints from machine to nodes
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:145
I0904 11:22:55.139440   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: getting machine "kubemark-actuator-testing-machineset-8drlz"
I0904 11:22:55.176481   31689 utils.go:165] Machine "kubemark-actuator-testing-machineset-8drlz" is backing node "595f4b23-c3ad-4106-bf5c-21ec235eb424"
STEP: getting the backed node "595f4b23-c3ad-4106-bf5c-21ec235eb424"
STEP: updating node "595f4b23-c3ad-4106-bf5c-21ec235eb424" with taint: {not-from-machine true NoSchedule <nil>}
STEP: updating machine "kubemark-actuator-testing-machineset-8drlz" with taint: {from-machine-573c4d4f-cf06-11e9-95fe-0ac9a22f5366 true NoSchedule <nil>}
I0904 11:22:55.197529   31689 infra.go:184] Getting node from machine again for verification of taints
I0904 11:22:55.207514   31689 utils.go:165] Machine "kubemark-actuator-testing-machineset-8drlz" is backing node "595f4b23-c3ad-4106-bf5c-21ec235eb424"
I0904 11:22:55.207559   31689 infra.go:194] Expected : map[from-machine-573c4d4f-cf06-11e9-95fe-0ac9a22f5366:{} not-from-machine:{}], observed map[kubemark:{} not-from-machine:{} from-machine-573c4d4f-cf06-11e9-95fe-0ac9a22f5366:{}] , difference map[], 
STEP: Getting the latest version of the original machine
STEP: Setting back the original machine taints
STEP: Getting the latest version of the node
I0904 11:22:55.224880   31689 utils.go:165] Machine "kubemark-actuator-testing-machineset-8drlz" is backing node "595f4b23-c3ad-4106-bf5c-21ec235eb424"
STEP: Setting back the original node taints
•
------------------------------
[Feature:Machines] Managed cluster should 
  recover from deleted worker machines
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:220
I0904 11:22:55.230511   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: checking initial cluster state
I0904 11:22:55.247452   31689 utils.go:87] Cluster size is 5 nodes
I0904 11:22:55.247479   31689 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes
I0904 11:22:55.250905   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0904 11:22:55.250933   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0904 11:22:55.250943   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0904 11:22:55.250952   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0904 11:22:55.253923   31689 utils.go:231] Node "166234dd-4999-47ec-b113-6931509cece9". Ready: true. Unschedulable: false
I0904 11:22:55.253946   31689 utils.go:231] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424". Ready: true. Unschedulable: false
I0904 11:22:55.253956   31689 utils.go:231] Node "cbe9202c-babb-4a83-bd70-8f21d75df033". Ready: true. Unschedulable: false
I0904 11:22:55.253964   31689 utils.go:231] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46". Ready: true. Unschedulable: false
I0904 11:22:55.253972   31689 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0904 11:22:55.257009   31689 utils.go:87] Cluster size is 5 nodes
I0904 11:22:55.257033   31689 utils.go:257] waiting for all nodes to be ready
I0904 11:22:55.263452   31689 utils.go:262] waiting for all nodes to be schedulable
I0904 11:22:55.266890   31689 utils.go:290] [remaining 1m0s] Node "166234dd-4999-47ec-b113-6931509cece9" is schedulable
I0904 11:22:55.266919   31689 utils.go:290] [remaining 1m0s] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424" is schedulable
I0904 11:22:55.266931   31689 utils.go:290] [remaining 1m0s] Node "cbe9202c-babb-4a83-bd70-8f21d75df033" is schedulable
I0904 11:22:55.266941   31689 utils.go:290] [remaining 1m0s] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46" is schedulable
I0904 11:22:55.266950   31689 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable
I0904 11:22:55.266959   31689 utils.go:267] waiting for each node to be backed by a machine
I0904 11:22:55.273183   31689 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0904 11:22:55.273214   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-8drlz" is linked to node "595f4b23-c3ad-4106-bf5c-21ec235eb424"
I0904 11:22:55.273230   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-kx7wv" is linked to node "cbe9202c-babb-4a83-bd70-8f21d75df033"
I0904 11:22:55.273245   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-rchkh" is linked to node "166234dd-4999-47ec-b113-6931509cece9"
I0904 11:22:55.273261   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-kpmfw" is linked to node "d1f145c4-1ed2-493f-97af-c24f5256cf46"
I0904 11:22:55.273274   31689 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"
STEP: getting worker node
STEP: deleting machine object "kubemark-actuator-testing-machineset-green-rchkh"
STEP: waiting for node object "166234dd-4999-47ec-b113-6931509cece9" to go away
I0904 11:22:55.289102   31689 infra.go:255] Node "166234dd-4999-47ec-b113-6931509cece9" still exists. Node conditions are: [{OutOfDisk False 2019-09-04 11:22:53 +0000 UTC 2019-09-04 11:20:19 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 11:22:53 +0000 UTC 2019-09-04 11:20:19 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 11:22:53 +0000 UTC 2019-09-04 11:20:19 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 11:22:53 +0000 UTC 2019-09-04 11:20:19 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 11:22:53 +0000 UTC 2019-09-04 11:20:19 +0000 UTC KubeletReady kubelet is posting ready status}]
STEP: waiting for new node object to come up
I0904 11:23:00.293878   31689 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes
I0904 11:23:00.296894   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0904 11:23:00.296912   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0904 11:23:00.296919   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0904 11:23:00.296924   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0904 11:23:00.299793   31689 utils.go:231] Node "4169660f-ac78-4b87-b90f-0358fa5efca9". Ready: true. Unschedulable: false
I0904 11:23:00.299815   31689 utils.go:231] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424". Ready: true. Unschedulable: false
I0904 11:23:00.299821   31689 utils.go:231] Node "cbe9202c-babb-4a83-bd70-8f21d75df033". Ready: true. Unschedulable: false
I0904 11:23:00.299826   31689 utils.go:231] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46". Ready: true. Unschedulable: false
I0904 11:23:00.299831   31689 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0904 11:23:00.302488   31689 utils.go:87] Cluster size is 5 nodes
I0904 11:23:00.302511   31689 utils.go:257] waiting for all nodes to be ready
I0904 11:23:00.311154   31689 utils.go:262] waiting for all nodes to be schedulable
I0904 11:23:00.323120   31689 utils.go:290] [remaining 1m0s] Node "4169660f-ac78-4b87-b90f-0358fa5efca9" is schedulable
I0904 11:23:00.323152   31689 utils.go:290] [remaining 1m0s] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424" is schedulable
I0904 11:23:00.323182   31689 utils.go:290] [remaining 1m0s] Node "cbe9202c-babb-4a83-bd70-8f21d75df033" is schedulable
I0904 11:23:00.323198   31689 utils.go:290] [remaining 1m0s] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46" is schedulable
I0904 11:23:00.323208   31689 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable
I0904 11:23:00.323219   31689 utils.go:267] waiting for each node to be backed by a machine
I0904 11:23:00.330953   31689 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0904 11:23:00.330985   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-8drlz" is linked to node "595f4b23-c3ad-4106-bf5c-21ec235eb424"
I0904 11:23:00.331002   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-kx7wv" is linked to node "cbe9202c-babb-4a83-bd70-8f21d75df033"
I0904 11:23:00.331015   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-2twbz" is linked to node "4169660f-ac78-4b87-b90f-0358fa5efca9"
I0904 11:23:00.331028   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-kpmfw" is linked to node "d1f145c4-1ed2-493f-97af-c24f5256cf46"
I0904 11:23:00.331040   31689 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"

• [SLOW TEST:5.101 seconds]
[Feature:Machines] Managed cluster should
/tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126
  recover from deleted worker machines
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:220
------------------------------
[Feature:Machines] Managed cluster should 
  grow and decrease when scaling different machineSets simultaneously
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:267
I0904 11:23:00.331134   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: checking existing cluster size
I0904 11:23:00.355220   31689 utils.go:87] Cluster size is 5 nodes
STEP: getting worker machineSets
I0904 11:23:00.361022   31689 infra.go:297] Creating transient MachineSet "e2e-5a525-w-0"
I0904 11:23:00.368505   31689 infra.go:297] Creating transient MachineSet "e2e-5a525-w-1"
STEP: scaling "e2e-5a525-w-0" from 0 to 2 replicas
I0904 11:23:00.374122   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: scaling "e2e-5a525-w-1" from 0 to 2 replicas
I0904 11:23:00.407243   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
E0904 11:23:00.447542   31689 utils.go:157] Machine "e2e-5a525-w-0-bgz4t" has no NodeRef
I0904 11:23:05.458397   31689 utils.go:165] Machine "e2e-5a525-w-0-bgz4t" is backing node "3c21bb7a-4f3e-444a-b521-dd4dbeef8007"
I0904 11:23:05.462166   31689 utils.go:165] Machine "e2e-5a525-w-0-klz8g" is backing node "e969fe05-b8d5-427b-8cc1-86207add2106"
I0904 11:23:05.462192   31689 utils.go:149] MachineSet "e2e-5a525-w-0" have 2 nodes
E0904 11:23:05.467615   31689 utils.go:157] Machine "e2e-5a525-w-1-bkn26" has no NodeRef
I0904 11:23:10.475346   31689 utils.go:165] Machine "e2e-5a525-w-0-bgz4t" is backing node "3c21bb7a-4f3e-444a-b521-dd4dbeef8007"
I0904 11:23:10.477765   31689 utils.go:165] Machine "e2e-5a525-w-0-klz8g" is backing node "e969fe05-b8d5-427b-8cc1-86207add2106"
I0904 11:23:10.477789   31689 utils.go:149] MachineSet "e2e-5a525-w-0" have 2 nodes
I0904 11:23:10.483570   31689 utils.go:165] Machine "e2e-5a525-w-1-bkn26" is backing node "8084551b-1aa4-4ec3-a41c-e09f6c3b541c"
I0904 11:23:10.485195   31689 utils.go:165] Machine "e2e-5a525-w-1-hgxsw" is backing node "4a823af9-c90b-4bae-a66a-afee939f78ea"
I0904 11:23:10.485226   31689 utils.go:149] MachineSet "e2e-5a525-w-1" have 2 nodes
I0904 11:23:10.485237   31689 utils.go:177] Node "3c21bb7a-4f3e-444a-b521-dd4dbeef8007" is ready. Conditions are: [{OutOfDisk False 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:03 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:03 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:03 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:03 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:03 +0000 UTC KubeletReady kubelet is posting ready status}]
I0904 11:23:10.485294   31689 utils.go:177] Node "e969fe05-b8d5-427b-8cc1-86207add2106" is ready. Conditions are: [{OutOfDisk False 2019-09-04 11:23:10 +0000 UTC 2019-09-04 11:23:04 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 11:23:10 +0000 UTC 2019-09-04 11:23:04 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 11:23:10 +0000 UTC 2019-09-04 11:23:04 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 11:23:10 +0000 UTC 2019-09-04 11:23:04 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 11:23:10 +0000 UTC 2019-09-04 11:23:04 +0000 UTC KubeletReady kubelet is posting ready status}]
I0904 11:23:10.485335   31689 utils.go:177] Node "8084551b-1aa4-4ec3-a41c-e09f6c3b541c" is ready. Conditions are: [{OutOfDisk False 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:05 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:05 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:05 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:05 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:05 +0000 UTC KubeletReady kubelet is posting ready status}]
I0904 11:23:10.485372   31689 utils.go:177] Node "4a823af9-c90b-4bae-a66a-afee939f78ea" is ready. Conditions are: [{OutOfDisk False 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:05 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:05 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:05 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:05 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-04 11:23:09 +0000 UTC 2019-09-04 11:23:05 +0000 UTC KubeletReady kubelet is posting ready status}]
STEP: scaling "e2e-5a525-w-0" from 2 to 0 replicas
I0904 11:23:10.485412   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: scaling "e2e-5a525-w-1" from 2 to 0 replicas
I0904 11:23:10.502654   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: waiting for cluster to get back to original size. Final size should be 5 nodes
I0904 11:23:10.529022   31689 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes
I0904 11:23:10.540856   31689 utils.go:99] MachineSet "e2e-5a525-w-0" replicas 0. Ready: 0, available 0
I0904 11:23:10.540893   31689 utils.go:99] MachineSet "e2e-5a525-w-1" replicas 0. Ready: 2, available 2
I0904 11:23:10.540903   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0904 11:23:10.540912   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0904 11:23:10.540922   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0904 11:23:10.540930   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0904 11:23:10.552258   31689 utils.go:231] Node "3c21bb7a-4f3e-444a-b521-dd4dbeef8007". Ready: true. Unschedulable: false
I0904 11:23:10.552286   31689 utils.go:231] Node "4169660f-ac78-4b87-b90f-0358fa5efca9". Ready: true. Unschedulable: false
I0904 11:23:10.552296   31689 utils.go:231] Node "4a823af9-c90b-4bae-a66a-afee939f78ea". Ready: true. Unschedulable: false
I0904 11:23:10.552304   31689 utils.go:231] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424". Ready: true. Unschedulable: false
I0904 11:23:10.552312   31689 utils.go:231] Node "8084551b-1aa4-4ec3-a41c-e09f6c3b541c". Ready: true. Unschedulable: false
I0904 11:23:10.552326   31689 utils.go:231] Node "cbe9202c-babb-4a83-bd70-8f21d75df033". Ready: true. Unschedulable: false
I0904 11:23:10.552335   31689 utils.go:231] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46". Ready: true. Unschedulable: false
I0904 11:23:10.552343   31689 utils.go:231] Node "e969fe05-b8d5-427b-8cc1-86207add2106". Ready: true. Unschedulable: true
I0904 11:23:10.552351   31689 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0904 11:23:10.560662   31689 utils.go:87] Cluster size is 9 nodes
I0904 11:23:15.560914   31689 utils.go:239] [remaining 14m55s] Cluster size expected to be 5 nodes
I0904 11:23:15.564364   31689 utils.go:99] MachineSet "e2e-5a525-w-0" replicas 0. Ready: 0, available 0
I0904 11:23:15.564392   31689 utils.go:99] MachineSet "e2e-5a525-w-1" replicas 0. Ready: 0, available 0
I0904 11:23:15.564402   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0904 11:23:15.564408   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0904 11:23:15.564413   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0904 11:23:15.564419   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0904 11:23:15.567109   31689 utils.go:231] Node "4169660f-ac78-4b87-b90f-0358fa5efca9". Ready: true. Unschedulable: false
I0904 11:23:15.567130   31689 utils.go:231] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424". Ready: true. Unschedulable: false
I0904 11:23:15.567136   31689 utils.go:231] Node "cbe9202c-babb-4a83-bd70-8f21d75df033". Ready: true. Unschedulable: false
I0904 11:23:15.567141   31689 utils.go:231] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46". Ready: true. Unschedulable: false
I0904 11:23:15.567147   31689 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0904 11:23:15.569725   31689 utils.go:87] Cluster size is 5 nodes
I0904 11:23:15.569754   31689 utils.go:257] waiting for all nodes to be ready
I0904 11:23:15.572226   31689 utils.go:262] waiting for all nodes to be schedulable
I0904 11:23:15.574889   31689 utils.go:290] [remaining 1m0s] Node "4169660f-ac78-4b87-b90f-0358fa5efca9" is schedulable
I0904 11:23:15.574915   31689 utils.go:290] [remaining 1m0s] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424" is schedulable
I0904 11:23:15.574926   31689 utils.go:290] [remaining 1m0s] Node "cbe9202c-babb-4a83-bd70-8f21d75df033" is schedulable
I0904 11:23:15.574936   31689 utils.go:290] [remaining 1m0s] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46" is schedulable
I0904 11:23:15.574945   31689 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable
I0904 11:23:15.574954   31689 utils.go:267] waiting for each node to be backed by a machine
I0904 11:23:15.580408   31689 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0904 11:23:15.580434   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-8drlz" is linked to node "595f4b23-c3ad-4106-bf5c-21ec235eb424"
I0904 11:23:15.580451   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-kx7wv" is linked to node "cbe9202c-babb-4a83-bd70-8f21d75df033"
I0904 11:23:15.580466   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-2twbz" is linked to node "4169660f-ac78-4b87-b90f-0358fa5efca9"
I0904 11:23:15.580486   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-kpmfw" is linked to node "d1f145c4-1ed2-493f-97af-c24f5256cf46"
I0904 11:23:15.580494   31689 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"

• [SLOW TEST:15.257 seconds]
[Feature:Machines] Managed cluster should
/tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126
  grow and decrease when scaling different machineSets simultaneously
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:267
------------------------------
[Feature:Machines] Managed cluster should 
  drain node before removing machine resource
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:346
I0904 11:23:15.588118   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: checking existing cluster size
I0904 11:23:15.604141   31689 utils.go:87] Cluster size is 5 nodes
STEP: Taking the first worker machineset (assuming only worker machines are backed by machinesets)
STEP: Creating two new machines, one for node about to be drained, other for moving workload from drained node
STEP: Waiting until both new nodes are ready
E0904 11:23:15.616829   31689 utils.go:342] [remaining 15m0s] Expecting 2 nodes with map[string]string{"node-role.kubernetes.io/worker":"", "node-draining-test":"1da1871e-cf06-11e9-95fe-0ac9a22f5366"} labels in Ready state, got 0
I0904 11:23:20.621042   31689 utils.go:346] [14m55s remaining] Expected number (2) of nodes with map[node-role.kubernetes.io/worker: node-draining-test:1da1871e-cf06-11e9-95fe-0ac9a22f5366] label in Ready state found
STEP: Creating RC with workload
STEP: Creating PDB for RC
STEP: Wait until all replicas are ready
I0904 11:23:20.667296   31689 utils.go:396] [15m0s remaining] Waiting for at least one RC ready replica, ReadyReplicas: 0, Replicas: 0
I0904 11:23:25.671679   31689 utils.go:396] [14m55s remaining] Waiting for at least one RC ready replica, ReadyReplicas: 0, Replicas: 20
I0904 11:23:30.669459   31689 utils.go:399] [14m50s remaining] Waiting for RC ready replicas, ReadyReplicas: 20, Replicas: 20
I0904 11:23:30.677461   31689 utils.go:416] POD #0/20: {
  "metadata": {
    "name": "pdb-workload-4w47v",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-4w47v",
    "uid": "666db241-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3873",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.15",
    "podIP": "10.98.148.170",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:26Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://4e82b8628edddfd9"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.677674   31689 utils.go:416] POD #1/20: {
  "metadata": {
    "name": "pdb-workload-5c7cd",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-5c7cd",
    "uid": "666af806-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3884",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.15",
    "podIP": "10.180.138.248",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://16188ab58ccb3d61"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.677835   31689 utils.go:416] POD #2/20: {
  "metadata": {
    "name": "pdb-workload-5f54q",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-5f54q",
    "uid": "666b5e57-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3862",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.15",
    "podIP": "10.21.228.50",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://dd6dc66fa6768e97"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.677963   31689 utils.go:416] POD #3/20: {
  "metadata": {
    "name": "pdb-workload-5hjx4",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-5hjx4",
    "uid": "66691cea-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3867",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.15",
    "podIP": "10.49.69.160",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:23Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://c89d391cdf8a2e5a"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.678130   31689 utils.go:416] POD #4/20: {
  "metadata": {
    "name": "pdb-workload-86g26",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-86g26",
    "uid": "666dc0e0-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3904",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "cae9ea51-ebbf-486e-a863-4da5f73358d7",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.152.63.108",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://a797ebfbc27fc293"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.678296   31689 utils.go:416] POD #5/20: {
  "metadata": {
    "name": "pdb-workload-8lrnl",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-8lrnl",
    "uid": "666da5ad-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3935",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.15",
    "podIP": "10.36.74.157",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:26Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://ddaff08aa04da5df"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.678453   31689 utils.go:416] POD #6/20: {
  "metadata": {
    "name": "pdb-workload-b8g8h",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-b8g8h",
    "uid": "666db8b7-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3876",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.15",
    "podIP": "10.109.5.97",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://d049e49c28167cd0"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.678600   31689 utils.go:416] POD #7/20: {
  "metadata": {
    "name": "pdb-workload-bpp8w",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-bpp8w",
    "uid": "666b2185-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3842",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "cae9ea51-ebbf-486e-a863-4da5f73358d7",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:25Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.184.204.15",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://8d78eeb9878a3d69"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.678758   31689 utils.go:416] POD #8/20: {
  "metadata": {
    "name": "pdb-workload-dd4l5",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-dd4l5",
    "uid": "666a0cea-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3915",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "cae9ea51-ebbf-486e-a863-4da5f73358d7",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.129.247.243",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://cbfbb96d5dfbfcf5"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.678900   31689 utils.go:416] POD #9/20: {
  "metadata": {
    "name": "pdb-workload-ddklf",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-ddklf",
    "uid": "66704ea4-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3901",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "cae9ea51-ebbf-486e-a863-4da5f73358d7",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.160.50.61",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:26Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://d325d4677e5748da"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.679066   31689 utils.go:416] POD #10/20: {
  "metadata": {
    "name": "pdb-workload-ddnwb",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-ddnwb",
    "uid": "666d4106-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3912",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "cae9ea51-ebbf-486e-a863-4da5f73358d7",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.221.138.233",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://3a5f0a545c728810"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.679214   31689 utils.go:416] POD #11/20: {
  "metadata": {
    "name": "pdb-workload-dhrnf",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-dhrnf",
    "uid": "6670280e-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3945",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "cae9ea51-ebbf-486e-a863-4da5f73358d7",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:28Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.147.143.34",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:26Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://8c84c4854d59a4f2"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.679344   31689 utils.go:416] POD #12/20: {
  "metadata": {
    "name": "pdb-workload-f2tr2",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-f2tr2",
    "uid": "66703d4d-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3942",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "cae9ea51-ebbf-486e-a863-4da5f73358d7",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:28Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.119.0.192",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:26Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://ee7b4f2c7a83f40e"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.679469   31689 utils.go:416] POD #13/20: {
  "metadata": {
    "name": "pdb-workload-f7bz4",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-f7bz4",
    "uid": "666ff6dd-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3878",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.15",
    "podIP": "10.117.4.9",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:26Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://7b65b80630451f0d"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.679593   31689 utils.go:416] POD #14/20: {
  "metadata": {
    "name": "pdb-workload-jxgnk",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-jxgnk",
    "uid": "666a0ca1-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3887",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.15",
    "podIP": "10.49.175.248",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://96c603a0a87fac0c"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.679713   31689 utils.go:416] POD #15/20: {
  "metadata": {
    "name": "pdb-workload-rdwbj",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-rdwbj",
    "uid": "666d8ce4-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3907",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "cae9ea51-ebbf-486e-a863-4da5f73358d7",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.207.3.207",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:26Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://8f334d3bd706bdc5"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.679856   31689 utils.go:416] POD #16/20: {
  "metadata": {
    "name": "pdb-workload-ssc4q",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-ssc4q",
    "uid": "666dc085-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3918",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "cae9ea51-ebbf-486e-a863-4da5f73358d7",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.147.245.166",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://517068b0ab704408"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.680017   31689 utils.go:416] POD #17/20: {
  "metadata": {
    "name": "pdb-workload-t9wqs",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-t9wqs",
    "uid": "666b1170-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3898",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "cae9ea51-ebbf-486e-a863-4da5f73358d7",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.204.209.11",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://1e12391546c927c3"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.680194   31689 utils.go:416] POD #18/20: {
  "metadata": {
    "name": "pdb-workload-wz8rf",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-wz8rf",
    "uid": "667060ae-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3870",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.15",
    "podIP": "10.72.198.79",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://bebcdfa74f538be0"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0904 11:23:30.680377   31689 utils.go:416] POD #19/20: {
  "metadata": {
    "name": "pdb-workload-z5dmt",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-z5dmt",
    "uid": "666db6ff-cf06-11e9-9d68-0ac9a22f5366",
    "resourceVersion": "3881",
    "creationTimestamp": "2019-09-04T11:23:20Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "66676fbe-cf06-11e9-9d68-0ac9a22f5366",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-ffdcj",
        "secret": {
          "secretName": "default-token-ffdcj",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-ffdcj",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "1da1871e-cf06-11e9-95fe-0ac9a22f5366",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-04T11:23:20Z"
      }
    ],
    "hostIP": "172.17.0.15",
    "podIP": "10.255.10.70",
    "startTime": "2019-09-04T11:23:20Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-04T11:23:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://89b30c15b51b295a"
      }
    ],
    "qosClass": "Burstable"
  }
}
STEP: Delete machine to trigger node draining
STEP: Observing and verifying node draining
E0904 11:23:30.696664   31689 utils.go:451] Node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4" is expected to be marked as unschedulable, it is not
I0904 11:23:35.707191   31689 utils.go:455] [remaining 14m55s] Node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4" is mark unschedulable as expected
I0904 11:23:35.721886   31689 utils.go:474] [remaining 14m55s] Have 9 pods scheduled to node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4"
I0904 11:23:35.723801   31689 utils.go:490] [remaining 14m55s] RC ReadyReplicas: 20, Replicas: 20
I0904 11:23:35.723826   31689 utils.go:500] [remaining 14m55s] Expecting at most 2 pods to be scheduled to drained node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4", got 9
I0904 11:23:40.705056   31689 utils.go:455] [remaining 14m50s] Node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4" is mark unschedulable as expected
I0904 11:23:40.719795   31689 utils.go:474] [remaining 14m50s] Have 8 pods scheduled to node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4"
I0904 11:23:40.721745   31689 utils.go:490] [remaining 14m50s] RC ReadyReplicas: 20, Replicas: 20
I0904 11:23:40.721770   31689 utils.go:500] [remaining 14m50s] Expecting at most 2 pods to be scheduled to drained node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4", got 8
I0904 11:23:45.701516   31689 utils.go:455] [remaining 14m45s] Node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4" is mark unschedulable as expected
I0904 11:23:45.709821   31689 utils.go:474] [remaining 14m45s] Have 7 pods scheduled to node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4"
I0904 11:23:45.715030   31689 utils.go:490] [remaining 14m45s] RC ReadyReplicas: 20, Replicas: 20
I0904 11:23:45.715056   31689 utils.go:500] [remaining 14m45s] Expecting at most 2 pods to be scheduled to drained node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4", got 7
I0904 11:23:50.700780   31689 utils.go:455] [remaining 14m40s] Node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4" is mark unschedulable as expected
I0904 11:23:50.706789   31689 utils.go:474] [remaining 14m40s] Have 6 pods scheduled to node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4"
I0904 11:23:50.708448   31689 utils.go:490] [remaining 14m40s] RC ReadyReplicas: 20, Replicas: 20
I0904 11:23:50.708476   31689 utils.go:500] [remaining 14m40s] Expecting at most 2 pods to be scheduled to drained node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4", got 6
I0904 11:23:55.700992   31689 utils.go:455] [remaining 14m35s] Node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4" is mark unschedulable as expected
I0904 11:23:55.706970   31689 utils.go:474] [remaining 14m35s] Have 5 pods scheduled to node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4"
I0904 11:23:55.708685   31689 utils.go:490] [remaining 14m35s] RC ReadyReplicas: 20, Replicas: 20
I0904 11:23:55.708724   31689 utils.go:500] [remaining 14m35s] Expecting at most 2 pods to be scheduled to drained node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4", got 5
I0904 11:24:00.702049   31689 utils.go:455] [remaining 14m30s] Node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4" is mark unschedulable as expected
I0904 11:24:00.708927   31689 utils.go:474] [remaining 14m30s] Have 4 pods scheduled to node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4"
I0904 11:24:00.711148   31689 utils.go:490] [remaining 14m30s] RC ReadyReplicas: 20, Replicas: 20
I0904 11:24:00.711209   31689 utils.go:500] [remaining 14m30s] Expecting at most 2 pods to be scheduled to drained node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4", got 4
I0904 11:24:05.701875   31689 utils.go:455] [remaining 14m25s] Node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4" is mark unschedulable as expected
I0904 11:24:05.710049   31689 utils.go:474] [remaining 14m25s] Have 3 pods scheduled to node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4"
I0904 11:24:05.711779   31689 utils.go:490] [remaining 14m25s] RC ReadyReplicas: 20, Replicas: 20
I0904 11:24:05.711803   31689 utils.go:500] [remaining 14m25s] Expecting at most 2 pods to be scheduled to drained node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4", got 3
I0904 11:24:10.700788   31689 utils.go:455] [remaining 14m20s] Node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4" is mark unschedulable as expected
I0904 11:24:10.706848   31689 utils.go:474] [remaining 14m20s] Have 2 pods scheduled to node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4"
I0904 11:24:10.708499   31689 utils.go:490] [remaining 14m20s] RC ReadyReplicas: 20, Replicas: 20
I0904 11:24:10.708526   31689 utils.go:504] [remaining 14m20s] Expected result: all pods from the RC up to last one or two got scheduled to a different node while respecting PDB
STEP: Validating the machine is deleted
E0904 11:24:10.710254   31689 infra.go:454] Machine "machine1" not yet deleted
E0904 11:24:15.712384   31689 infra.go:454] Machine "machine1" not yet deleted
I0904 11:24:20.713577   31689 infra.go:463] Machine "machine1" successfully deleted
STEP: Validate underlying node corresponding to machine1 is removed as well
I0904 11:24:20.715866   31689 utils.go:530] [15m0s remaining] Node "bebdfc33-fa59-4fb2-8ba9-aeb9624e3db4" successfully deleted
STEP: Delete PDB
STEP: Delete machine2
STEP: waiting for cluster to get back to original size. Final size should be 5 nodes
I0904 11:24:20.725769   31689 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes
I0904 11:24:20.730223   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0904 11:24:20.730246   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0904 11:24:20.730256   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0904 11:24:20.730266   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0904 11:24:20.741498   31689 utils.go:231] Node "4169660f-ac78-4b87-b90f-0358fa5efca9". Ready: true. Unschedulable: false
I0904 11:24:20.741519   31689 utils.go:231] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424". Ready: true. Unschedulable: false
I0904 11:24:20.741529   31689 utils.go:231] Node "cae9ea51-ebbf-486e-a863-4da5f73358d7". Ready: true. Unschedulable: true
I0904 11:24:20.741537   31689 utils.go:231] Node "cbe9202c-babb-4a83-bd70-8f21d75df033". Ready: true. Unschedulable: false
I0904 11:24:20.741545   31689 utils.go:231] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46". Ready: true. Unschedulable: false
I0904 11:24:20.741552   31689 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0904 11:24:20.748603   31689 utils.go:87] Cluster size is 6 nodes
I0904 11:24:25.748841   31689 utils.go:239] [remaining 14m55s] Cluster size expected to be 5 nodes
I0904 11:24:25.754140   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0904 11:24:25.754185   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0904 11:24:25.754196   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0904 11:24:25.754205   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0904 11:24:25.758117   31689 utils.go:231] Node "4169660f-ac78-4b87-b90f-0358fa5efca9". Ready: true. Unschedulable: false
I0904 11:24:25.758139   31689 utils.go:231] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424". Ready: true. Unschedulable: false
I0904 11:24:25.758145   31689 utils.go:231] Node "cae9ea51-ebbf-486e-a863-4da5f73358d7". Ready: true. Unschedulable: true
I0904 11:24:25.758151   31689 utils.go:231] Node "cbe9202c-babb-4a83-bd70-8f21d75df033". Ready: true. Unschedulable: false
I0904 11:24:25.758156   31689 utils.go:231] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46". Ready: true. Unschedulable: false
I0904 11:24:25.758189   31689 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0904 11:24:25.761548   31689 utils.go:87] Cluster size is 6 nodes
I0904 11:24:30.748785   31689 utils.go:239] [remaining 14m50s] Cluster size expected to be 5 nodes
I0904 11:24:30.753562   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0904 11:24:30.753583   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0904 11:24:30.753590   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0904 11:24:30.753595   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0904 11:24:30.756943   31689 utils.go:231] Node "4169660f-ac78-4b87-b90f-0358fa5efca9". Ready: true. Unschedulable: false
I0904 11:24:30.756976   31689 utils.go:231] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424". Ready: true. Unschedulable: false
I0904 11:24:30.756986   31689 utils.go:231] Node "cae9ea51-ebbf-486e-a863-4da5f73358d7". Ready: true. Unschedulable: true
I0904 11:24:30.756995   31689 utils.go:231] Node "cbe9202c-babb-4a83-bd70-8f21d75df033". Ready: true. Unschedulable: false
I0904 11:24:30.757004   31689 utils.go:231] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46". Ready: true. Unschedulable: false
I0904 11:24:30.757016   31689 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0904 11:24:30.760620   31689 utils.go:87] Cluster size is 6 nodes
I0904 11:24:35.748792   31689 utils.go:239] [remaining 14m45s] Cluster size expected to be 5 nodes
I0904 11:24:35.752826   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0904 11:24:35.752854   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0904 11:24:35.752865   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0904 11:24:35.752874   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0904 11:24:35.756607   31689 utils.go:231] Node "4169660f-ac78-4b87-b90f-0358fa5efca9". Ready: true. Unschedulable: false
I0904 11:24:35.756631   31689 utils.go:231] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424". Ready: true. Unschedulable: false
I0904 11:24:35.756641   31689 utils.go:231] Node "cae9ea51-ebbf-486e-a863-4da5f73358d7". Ready: true. Unschedulable: true
I0904 11:24:35.756649   31689 utils.go:231] Node "cbe9202c-babb-4a83-bd70-8f21d75df033". Ready: true. Unschedulable: false
I0904 11:24:35.756657   31689 utils.go:231] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46". Ready: true. Unschedulable: false
I0904 11:24:35.756665   31689 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0904 11:24:35.759792   31689 utils.go:87] Cluster size is 6 nodes
I0904 11:24:40.749123   31689 utils.go:239] [remaining 14m40s] Cluster size expected to be 5 nodes
I0904 11:24:40.754793   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0904 11:24:40.754820   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0904 11:24:40.754830   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0904 11:24:40.754840   31689 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0904 11:24:40.762190   31689 utils.go:231] Node "4169660f-ac78-4b87-b90f-0358fa5efca9". Ready: true. Unschedulable: false
I0904 11:24:40.762218   31689 utils.go:231] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424". Ready: true. Unschedulable: false
I0904 11:24:40.762227   31689 utils.go:231] Node "cbe9202c-babb-4a83-bd70-8f21d75df033". Ready: true. Unschedulable: false
I0904 11:24:40.762236   31689 utils.go:231] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46". Ready: true. Unschedulable: false
I0904 11:24:40.762244   31689 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0904 11:24:40.765326   31689 utils.go:87] Cluster size is 5 nodes
I0904 11:24:40.765347   31689 utils.go:257] waiting for all nodes to be ready
I0904 11:24:40.768429   31689 utils.go:262] waiting for all nodes to be schedulable
I0904 11:24:40.771467   31689 utils.go:290] [remaining 1m0s] Node "4169660f-ac78-4b87-b90f-0358fa5efca9" is schedulable
I0904 11:24:40.771493   31689 utils.go:290] [remaining 1m0s] Node "595f4b23-c3ad-4106-bf5c-21ec235eb424" is schedulable
I0904 11:24:40.771505   31689 utils.go:290] [remaining 1m0s] Node "cbe9202c-babb-4a83-bd70-8f21d75df033" is schedulable
I0904 11:24:40.771515   31689 utils.go:290] [remaining 1m0s] Node "d1f145c4-1ed2-493f-97af-c24f5256cf46" is schedulable
I0904 11:24:40.771525   31689 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable
I0904 11:24:40.771538   31689 utils.go:267] waiting for each node to be backed by a machine
I0904 11:24:40.779298   31689 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0904 11:24:40.779335   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-8drlz" is linked to node "595f4b23-c3ad-4106-bf5c-21ec235eb424"
I0904 11:24:40.779350   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-kx7wv" is linked to node "cbe9202c-babb-4a83-bd70-8f21d75df033"
I0904 11:24:40.779363   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-2twbz" is linked to node "4169660f-ac78-4b87-b90f-0358fa5efca9"
I0904 11:24:40.779379   31689 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-kpmfw" is linked to node "d1f145c4-1ed2-493f-97af-c24f5256cf46"
I0904 11:24:40.779392   31689 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"
I0904 11:24:40.790403   31689 utils.go:378] [15m0s remaining] Found 0 number of nodes with map[node-role.kubernetes.io/worker: node-draining-test:1da1871e-cf06-11e9-95fe-0ac9a22f5366] label as expected

• [SLOW TEST:85.202 seconds]
[Feature:Machines] Managed cluster should
/tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126
  drain node before removing machine resource
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:346
------------------------------
[Feature:Machines] Managed cluster should 
  reject invalid machinesets
  /tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:487
I0904 11:24:40.790514   31689 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: Creating invalid machineset
STEP: Waiting for ReconcileError MachineSet event
I0904 11:24:40.908036   31689 infra.go:506] Fetching ReconcileError MachineSet invalid-machineset event
I0904 11:24:40.908094   31689 infra.go:512] Found ReconcileError event for "invalid-machineset" machine set with the following message: "invalid-machineset" machineset validation failed: spec.template.metadata.labels: Invalid value: map[string]string{"big-kitty":"i-am-bit-kitty"}: `selector` does not match template `labels`
STEP: Verify no machine from "invalid-machineset" machineset were created
I0904 11:24:40.911099   31689 infra.go:528] Have 0 machines generated from "invalid-machineset" machineset
STEP: Deleting invalid machineset
•
Ran 7 of 16 Specs in 202.375 seconds
SUCCESS! -- 7 Passed | 0 Failed | 0 Pending | 9 Skipped
--- PASS: TestE2E (202.38s)
PASS
ok  	github.com/openshift/cluster-api-actuator-pkg/pkg/e2e	202.422s
make[1]: Leaving directory `/tmp/tmp.sOq0VYkqJs/src/github.com/openshift/cluster-api-actuator-pkg'
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: RUN E2E TESTS [00h 04m 26s] ##########
[PostBuildScript] - Executing post build scripts.
[workspace@2] $ /bin/bash /tmp/jenkins3573783162949132135.sh
########## STARTING STAGE: DOWNLOAD ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/gathered
+ rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/gathered
+ mkdir -p /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/gathered
+ tree /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/gathered
/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/gathered

0 directories, 0 files
+ exit 0
[workspace@2] $ /bin/bash /tmp/jenkins8501752695808421797.sh
########## STARTING STAGE: GENERATE ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/generated
+ rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/generated
+ mkdir /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/generated
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo docker version && sudo docker info && sudo docker images && sudo docker ps -a 2>&1'
  WARNING: You're not using the default seccomp profile
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo cat /etc/sysconfig/docker /etc/sysconfig/docker-network /etc/sysconfig/docker-storage /etc/sysconfig/docker-storage-setup /etc/systemd/system/docker.service 2>&1'
+ true
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo find /var/lib/docker/containers -name *.log | sudo xargs tail -vn +1 2>&1'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo ausearch -m AVC -m SELINUX_ERR -m USER_AVC 2>&1'
+ true
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo df -T -h && sudo pvs && sudo vgs && sudo lvs && sudo findmnt --all 2>&1'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo yum list installed 2>&1'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl --dmesg --no-pager --all --lines=all 2>&1'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl _PID=1 --no-pager --all --lines=all 2>&1'
+ tree /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/generated
/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/generated
├── avc_denials.log
├── containers.log
├── dmesg.log
├── docker.config
├── docker.info
├── filesystem.info
├── installed_packages.log
└── pid1.journal

0 directories, 8 files
+ exit 0
[workspace@2] $ /bin/bash /tmp/jenkins8101141956822573995.sh
########## STARTING STAGE: FETCH SYSTEMD JOURNALS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/journals
+ rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/journals
+ mkdir /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/journals
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit docker.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit dnsmasq.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all
+ tree /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/journals
/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/artifacts/journals
├── dnsmasq.service
├── docker.service
└── systemd-journald.service

0 directories, 3 files
+ exit 0
[workspace@2] $ /bin/bash /tmp/jenkins7415123878710042857.sh
########## STARTING STAGE: ASSEMBLE GCS OUTPUT ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
+ trap 'exit 0' EXIT
+ mkdir -p gcs/artifacts gcs/artifacts/generated gcs/artifacts/journals gcs/artifacts/gathered
++ python -c 'import json; import urllib; print json.load(urllib.urlopen('\''https://ci.openshift.redhat.com/jenkins/job/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/276/api/json'\''))['\''result'\'']'
+ result=SUCCESS
+ cat
++ date +%s
+ cat /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/builds/276/log
+ cp artifacts/generated/avc_denials.log artifacts/generated/containers.log artifacts/generated/dmesg.log artifacts/generated/docker.config artifacts/generated/docker.info artifacts/generated/filesystem.info artifacts/generated/installed_packages.log artifacts/generated/pid1.journal gcs/artifacts/generated/
+ cp artifacts/journals/dnsmasq.service artifacts/journals/docker.service artifacts/journals/systemd-journald.service gcs/artifacts/journals/
+ cp -r 'artifacts/gathered/*' gcs/artifacts/
cp: cannot stat ‘artifacts/gathered/*’: No such file or directory
++ export status=FAILURE
++ status=FAILURE
+ exit 0
[workspace@2] $ /bin/bash /tmp/jenkins1080292076551695019.sh
########## STARTING STAGE: PUSH THE ARTIFACTS AND METADATA ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.Su74RFtHzJ
+ cat
+ chmod +x /tmp/tmp.Su74RFtHzJ
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.Su74RFtHzJ openshiftdevel:/tmp/tmp.Su74RFtHzJ
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 300 /tmp/tmp.Su74RFtHzJ"'
+ cd /home/origin
+ trap 'exit 0' EXIT
+ [[ -n {"type":"presubmit","job":"pull-ci-openshift-cluster-autoscaler-operator-master-e2e","buildid":"1169205390341050368","prowjobid":"33b339da-cf04-11e9-ab71-0a58ac108d31","refs":{"org":"openshift","repo":"cluster-autoscaler-operator","repo_link":"https://github.com/openshift/cluster-autoscaler-operator","base_ref":"master","base_sha":"5408e9b6aa7c16908e7cdd5dc75d647c449601f3","base_link":"https://github.com/openshift/cluster-autoscaler-operator/commit/5408e9b6aa7c16908e7cdd5dc75d647c449601f3","pulls":[{"number":117,"author":"enxebre","sha":"b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117","commit_link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117/commits/b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","author_link":"https://github.com/enxebre"}]}} ]]
++ jq --compact-output '.buildid |= "276"'
+ JOB_SPEC='{"type":"presubmit","job":"pull-ci-openshift-cluster-autoscaler-operator-master-e2e","buildid":"276","prowjobid":"33b339da-cf04-11e9-ab71-0a58ac108d31","refs":{"org":"openshift","repo":"cluster-autoscaler-operator","repo_link":"https://github.com/openshift/cluster-autoscaler-operator","base_ref":"master","base_sha":"5408e9b6aa7c16908e7cdd5dc75d647c449601f3","base_link":"https://github.com/openshift/cluster-autoscaler-operator/commit/5408e9b6aa7c16908e7cdd5dc75d647c449601f3","pulls":[{"number":117,"author":"enxebre","sha":"b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117","commit_link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117/commits/b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","author_link":"https://github.com/enxebre"}]}}'
+ docker run -e 'JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-cluster-autoscaler-operator-master-e2e","buildid":"276","prowjobid":"33b339da-cf04-11e9-ab71-0a58ac108d31","refs":{"org":"openshift","repo":"cluster-autoscaler-operator","repo_link":"https://github.com/openshift/cluster-autoscaler-operator","base_ref":"master","base_sha":"5408e9b6aa7c16908e7cdd5dc75d647c449601f3","base_link":"https://github.com/openshift/cluster-autoscaler-operator/commit/5408e9b6aa7c16908e7cdd5dc75d647c449601f3","pulls":[{"number":117,"author":"enxebre","sha":"b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117","commit_link":"https://github.com/openshift/cluster-autoscaler-operator/pull/117/commits/b85c52e35f8ac23efb92c6c7a5d503b05f0f55a3","author_link":"https://github.com/enxebre"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/gcsupload:latest --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin '/data/gcs/*'
Unable to find image 'registry.svc.ci.openshift.org/ci/gcsupload:latest' locally
Trying to pull repository registry.svc.ci.openshift.org/ci/gcsupload ... 
latest: Pulling from registry.svc.ci.openshift.org/ci/gcsupload
a073c86ecf9e: Already exists
cc3fc741b1a9: Already exists
822bed51ba40: Pulling fs layer
85cea451eec0: Pulling fs layer
85cea451eec0: Verifying Checksum
85cea451eec0: Download complete
822bed51ba40: Verifying Checksum
822bed51ba40: Download complete
822bed51ba40: Pull complete
85cea451eec0: Pull complete
Digest: sha256:03aad50d7ec631ee07c12ac2ba679bd48c7781f7d5754f9e0dcc4e7260e35208
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/gcsupload:latest
{"component":"gcsupload","file":"prow/gcsupload/run.go:107","func":"k8s.io/test-infra/prow/gcsupload.Options.assembleTargets","level":"warning","msg":"Encountered error in resolving items to upload for /data/gcs/*: stat /data/gcs/*: no such file or directory","time":"2019-09-04T11:25:02Z"}
{"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/276.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T11:25:02Z"}
{"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T11:25:02Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-autoscaler-operator/117/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-04T11:25:02Z"}
{"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/276.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T11:25:03Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-autoscaler-operator/117/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T11:25:03Z"}
{"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-04T11:25:03Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:65","func":"k8s.io/test-infra/prow/gcsupload.Options.Run","level":"info","msg":"Finished upload to GCS","time":"2019-09-04T11:25:03Z"}
+ exit 0
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: PUSH THE ARTIFACTS AND METADATA [00h 00m 06s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins8496912179408766053.sh
########## STARTING STAGE: DEPROVISION CLOUD RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config
+ oct deprovision

PLAYBOOK: main.yml *************************************************************
4 plays in /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml

PLAY [ensure we have the parameters necessary to deprovision virtual hosts] ****

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:9
skipping: [localhost] => (item=origin_ci_inventory_dir)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:25:04.587474", 
    "item": "origin_ci_inventory_dir", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_region)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:25:04.591859", 
    "item": "origin_ci_aws_region", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

PLAY [deprovision virtual hosts in EC2] ****************************************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

TASK [deprovision a virtual EC2 host] ******************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:28
included: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml for localhost

TASK [update the SSH configuration to remove AWS EC2 specifics] ****************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:2
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-04 07:25:05.414477", 
    "msg": ""
}

TASK [rename EC2 instance for termination reaper] ******************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:8
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-04 07:25:06.198072", 
    "msg": "Tags {'Name': 'oct-terminate'} created for resource i-00026d36a3df1ef96."
}

TASK [tear down the EC2 instance] **********************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:15
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-04 07:25:07.164784", 
    "instance_ids": [
        "i-00026d36a3df1ef96"
    ], 
    "instances": [
        {
            "ami_launch_index": "0", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-028c56dcaee239954"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-0c9838ad75698812b"
                }
            }, 
            "dns_name": "ec2-54-227-18-68.compute-1.amazonaws.com", 
            "ebs_optimized": false, 
            "groups": {
                "sg-7e73221a": "default"
            }, 
            "hypervisor": "xen", 
            "id": "i-00026d36a3df1ef96", 
            "image_id": "ami-0b77b87a37c3e662c", 
            "instance_type": "m4.xlarge", 
            "kernel": null, 
            "key_name": "libra", 
            "launch_time": "2019-09-04T11:08:01.000Z", 
            "placement": "us-east-1c", 
            "private_dns_name": "ip-172-18-19-207.ec2.internal", 
            "private_ip": "172.18.19.207", 
            "public_dns_name": "ec2-54-227-18-68.compute-1.amazonaws.com", 
            "public_ip": "54.227.18.68", 
            "ramdisk": null, 
            "region": "us-east-1", 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "running", 
            "state_code": 16, 
            "tags": {
                "Name": "oct-terminate", 
                "openshift_etcd": "", 
                "openshift_master": "", 
                "openshift_node": ""
            }, 
            "tenancy": "default", 
            "virtualization_type": "hvm"
        }
    ], 
    "tagged_instances": []
}

TASK [remove the serialized host variables] ************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:22
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-04 07:25:07.406764", 
    "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory/host_vars/172.18.19.207.yml", 
    "state": "absent"
}

PLAY [deprovision virtual hosts locally manged by Vagrant] *********************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

PLAY [clean up local configuration for deprovisioned instances] ****************

TASK [remove inventory configuration directory] ********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:61
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-04 07:25:07.913409", 
    "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-autoscaler-operator-master-e2e/workspace@2/.config/origin-ci-tool/inventory", 
    "state": "absent"
}

PLAY RECAP *********************************************************************
localhost                  : ok=8    changed=4    unreachable=0    failed=0   

+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPROVISION CLOUD RESOURCES [00h 00m 04s] ##########
Archiving artifacts
Recording test results
[WS-CLEANUP] Deleting project workspace...
[WS-CLEANUP] Deferred wipeout is used...
[WS-CLEANUP] done
Finished: SUCCESS