Console Output

Started by user OpenShift CI Robot
[EnvInject] - Loading node environment variables.
Building in workspace /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace
[WS-CLEANUP] Deleting project workspace...
[WS-CLEANUP] Deferred wipeout is used...
[workspace] $ /bin/bash /tmp/jenkins7667265087490605351.sh
########## STARTING STAGE: INSTALL THE ORIGIN-CI-TOOL ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
++ readlink /var/lib/jenkins/origin-ci-tool/latest
+ latest=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
+ touch /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
+ cp /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin/activate /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
+ cat
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
+ mkdir -p /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
+ rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool
+ oct configure ansible-client verbosity 2
Option verbosity updated to be 2.
+ oct configure aws-client keypair_name libra
Option keypair_name updated to be libra.
+ oct configure aws-client private_key_path /var/lib/jenkins/.ssh/devenv.pem
Option private_key_path updated to be /var/lib/jenkins/.ssh/devenv.pem.
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL THE ORIGIN-CI-TOOL [00h 00m 01s] ##########
[workspace] $ /bin/bash /tmp/jenkins2327818763820155387.sh
########## STARTING STAGE: PROVISION CLOUD RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
+ oct provision remote all-in-one --os rhel --stage base --provider aws --discrete-ssh-config --name pull-ci-openshift-kubernetes-autoscaler-master-e2e_41

PLAYBOOK: aws-up.yml ***********************************************************
2 plays in /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml

PLAY [ensure we have the parameters necessary to bring up the AWS EC2 instance] ***

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml:9
skipping: [localhost] => (item=origin_ci_inventory_dir)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.669434", 
    "item": "origin_ci_inventory_dir", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_keypair_name)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.671881", 
    "item": "origin_ci_aws_keypair_name", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_private_key_path)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.674974", 
    "item": "origin_ci_aws_private_key_path", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_region)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.678297", 
    "item": "origin_ci_aws_region", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_ami_tags)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.681368", 
    "item": "origin_ci_aws_ami_tags", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_instance_name)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.684429", 
    "item": "origin_ci_aws_instance_name", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_instance_type)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.688616", 
    "item": "origin_ci_aws_master_instance_type", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_identifying_tag_key)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.691654", 
    "item": "origin_ci_aws_identifying_tag_key", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_hostname)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.694693", 
    "item": "origin_ci_aws_hostname", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_ssh_config_strategy)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.698885", 
    "item": "origin_ci_ssh_config_strategy", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=openshift_schedulable)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.701939", 
    "item": "openshift_schedulable", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=openshift_node_labels)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.705604", 
    "item": "openshift_node_labels", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml:27
skipping: [localhost] => (item=origin_ci_aws_master_subnet)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.738830", 
    "item": "origin_ci_aws_master_subnet", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_etcd_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.743544", 
    "item": "origin_ci_aws_etcd_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_node_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.748843", 
    "item": "origin_ci_aws_node_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.753070", 
    "item": "origin_ci_aws_master_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_external_elb_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.758470", 
    "item": "origin_ci_aws_master_external_elb_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_internal_elb_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.762678", 
    "item": "origin_ci_aws_master_internal_elb_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_router_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.768056", 
    "item": "origin_ci_aws_router_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_router_elb_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:23.772952", 
    "item": "origin_ci_aws_router_elb_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

PLAY [provision an AWS EC2 instance] *******************************************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

TASK [inventory : initialize the inventory directory] **************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:2
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:24.588220", 
    "gid": 995, 
    "group": "jenkins", 
    "mode": "0755", 
    "owner": "jenkins", 
    "path": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 997
}

TASK [inventory : add the nested group mapping] ********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:7
changed: [localhost] => {
    "changed": true, 
    "checksum": "18aaee00994df38cc3a63b635893175235331a9c", 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/nested_group_mappings", 
    "generated_timestamp": "2019-09-06 11:38:25.052928", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "b30c3226ea63efa3ff9c5e346c14a16e", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 93, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567784304.83-239158514357150/source", 
    "state": "file", 
    "uid": 997
}

TASK [inventory : initialize the OSEv3 group variables directory] **************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:12
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-06 11:38:25.216485", 
    "gid": 995, 
    "group": "jenkins", 
    "mode": "0755", 
    "owner": "jenkins", 
    "path": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/group_vars/OSEv3", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 997
}

TASK [inventory : initialize the host variables directory] *********************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:17
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-06 11:38:25.377425", 
    "gid": 995, 
    "group": "jenkins", 
    "mode": "0755", 
    "owner": "jenkins", 
    "path": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/host_vars", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 997
}

TASK [inventory : add the default Origin installation configuration] ***********
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:22
changed: [localhost] => {
    "changed": true, 
    "checksum": "4c06ba508f055c20f13426e8587342e8765a7b66", 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/group_vars/OSEv3/general.yml", 
    "generated_timestamp": "2019-09-06 11:38:25.677952", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "8aec71c75f7d512b278ae7c6f2959b12", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 331, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567784305.54-247677974123548/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : determine if we are inside AWS EC2] *****************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:2
changed: [localhost] => {
    "changed": true, 
    "cmd": [
        "curl", 
        "-s", 
        "http://instance-data.ec2.internal"
    ], 
    "delta": "0:00:00.015810", 
    "end": "2019-09-06 11:38:25.911369", 
    "failed": false, 
    "failed_when_result": false, 
    "generated_timestamp": "2019-09-06 11:38:25.928994", 
    "rc": 0, 
    "start": "2019-09-06 11:38:25.895559", 
    "stderr": [], 
    "stdout": [
        "1.0", 
        "2007-01-19", 
        "2007-03-01", 
        "2007-08-29", 
        "2007-10-10", 
        "2007-12-15", 
        "2008-02-01", 
        "2008-09-01", 
        "2009-04-04", 
        "2011-01-01", 
        "2011-05-01", 
        "2012-01-12", 
        "2014-02-25", 
        "2014-11-05", 
        "2015-10-20", 
        "2016-04-19", 
        "2016-06-30", 
        "2016-09-02", 
        "2018-03-28", 
        "2018-08-17", 
        "2018-09-24", 
        "latest"
    ], 
    "warnings": [
        "Consider using get_url or uri module rather than running curl"
    ]
}
 [WARNING]: Consider using get_url or uri module rather than running curl

TASK [aws-up : configure EC2 parameters for inventory when controlling from inside EC2] ***
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:7
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_destination_variable": "private_dns_name", 
        "origin_ci_aws_host_address_variable": "private_ip", 
        "origin_ci_aws_vpc_destination_variable": "private_ip_address"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:25.966905"
}

TASK [aws-up : determine where to put the AWS API cache] ***********************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:14
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_cache_dir": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ec2_cache"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:25.999793"
}

TASK [aws-up : ensure we have a place to put the AWS API cache] ****************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:18
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-06 11:38:26.166532", 
    "gid": 995, 
    "group": "jenkins", 
    "mode": "0755", 
    "owner": "jenkins", 
    "path": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ec2_cache", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 997
}

TASK [aws-up : place the EC2 dynamic inventory script] *************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:23
changed: [localhost] => {
    "changed": true, 
    "checksum": "625b8af723189db3b96ba0026d0f997a0025bc47", 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/ec2.py", 
    "generated_timestamp": "2019-09-06 11:38:26.467047", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "cac06c14065dac74904232b89d4ba24c", 
    "mode": "0755", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 63725, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567784306.33-167852628649172/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : place the EC2 dynamic inventory configuration] ******************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:29
changed: [localhost] => {
    "changed": true, 
    "checksum": "5699e56212b8cae11b851b720ee8138dd77ac156", 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/ec2.ini", 
    "generated_timestamp": "2019-09-06 11:38:26.757467", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "3605001aae4c8b7ba156b0aed5298688", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 422, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567784306.51-93202188123269/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : place the EC2 tag to group mappings] ****************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:34
changed: [localhost] => {
    "changed": true, 
    "checksum": "b4205a33dc73f62bd4f77f35d045cf8e09ae62b0", 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/tag_to_group_mappings", 
    "generated_timestamp": "2019-09-06 11:38:27.052485", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "bc3a567a1b6f342e1005182efc1b66be", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 287, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567784306.92-183138977059144/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : list available AMIs] ********************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:40
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:29.920889", 
    "results": [
        {
            "ami_id": "ami-04f9b88b6b0571f20", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 75, 
                    "snapshot_id": "snap-0655d2d962c590c8c", 
                    "volume_type": "gp2"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 50, 
                    "snapshot_id": "snap-0d86ae865b17f4def", 
                    "volume_type": "gp2"
                }
            }, 
            "creationDate": "2018-06-26T12:22:31.000Z", 
            "description": "OpenShift Origin development AMI on rhel at the base stage.", 
            "hypervisor": "xen", 
            "is_public": false, 
            "location": "531415883065/ami_build_origin_int_rhel_base_758", 
            "name": "ami_build_origin_int_rhel_base_758", 
            "owner_id": "531415883065", 
            "platform": null, 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "available", 
            "tags": {
                "Name": "ami_build_origin_int_rhel_base_758", 
                "image_stage": "base", 
                "operating_system": "rhel", 
                "ready": "yes"
            }, 
            "virtualization_type": "hvm"
        }, 
        {
            "ami_id": "ami-0b77b87a37c3e662c", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 75, 
                    "snapshot_id": "snap-02ec23d4818f2747e", 
                    "volume_type": "gp2"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 50, 
                    "snapshot_id": "snap-0d8726e441d4ca329", 
                    "volume_type": "gp2"
                }
            }, 
            "creationDate": "2018-06-26T22:18:53.000Z", 
            "description": "OpenShift Origin development AMI on rhel at the base stage.", 
            "hypervisor": "xen", 
            "is_public": false, 
            "location": "531415883065/ami_build_origin_int_rhel_base_760", 
            "name": "ami_build_origin_int_rhel_base_760", 
            "owner_id": "531415883065", 
            "platform": null, 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "available", 
            "tags": {
                "Name": "ami_build_origin_int_rhel_base_760", 
                "image_stage": "base", 
                "operating_system": "rhel", 
                "ready": "yes"
            }, 
            "virtualization_type": "hvm"
        }
    ]
}

TASK [aws-up : choose appropriate AMIs for use] ********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:49
ok: [localhost] => (item={u'ami_id': u'ami-04f9b88b6b0571f20', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the base stage.', u'tags': {u'ready': u'yes', u'image_stage': u'base', u'Name': u'ami_build_origin_int_rhel_base_758', u'operating_system': u'rhel'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-0d86ae865b17f4def', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 50}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-0655d2d962c590c8c', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 75}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_base_758', u'is_public': False, u'creationDate': u'2018-06-26T12:22:31.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_base_758'}) => {
    "ansible_facts": {
        "origin_ci_aws_ami_id_candidate": "ami-04f9b88b6b0571f20"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:29.965285", 
    "item": {
        "ami_id": "ami-04f9b88b6b0571f20", 
        "architecture": "x86_64", 
        "block_device_mapping": {
            "/dev/sda1": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 75, 
                "snapshot_id": "snap-0655d2d962c590c8c", 
                "volume_type": "gp2"
            }, 
            "/dev/sdb": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 50, 
                "snapshot_id": "snap-0d86ae865b17f4def", 
                "volume_type": "gp2"
            }
        }, 
        "creationDate": "2018-06-26T12:22:31.000Z", 
        "description": "OpenShift Origin development AMI on rhel at the base stage.", 
        "hypervisor": "xen", 
        "is_public": false, 
        "location": "531415883065/ami_build_origin_int_rhel_base_758", 
        "name": "ami_build_origin_int_rhel_base_758", 
        "owner_id": "531415883065", 
        "platform": null, 
        "root_device_name": "/dev/sda1", 
        "root_device_type": "ebs", 
        "state": "available", 
        "tags": {
            "Name": "ami_build_origin_int_rhel_base_758", 
            "image_stage": "base", 
            "operating_system": "rhel", 
            "ready": "yes"
        }, 
        "virtualization_type": "hvm"
    }
}
ok: [localhost] => (item={u'ami_id': u'ami-0b77b87a37c3e662c', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the base stage.', u'tags': {u'ready': u'yes', u'image_stage': u'base', u'Name': u'ami_build_origin_int_rhel_base_760', u'operating_system': u'rhel'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-0d8726e441d4ca329', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 50}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-02ec23d4818f2747e', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 75}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_base_760', u'is_public': False, u'creationDate': u'2018-06-26T22:18:53.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_base_760'}) => {
    "ansible_facts": {
        "origin_ci_aws_ami_id_candidate": "ami-0b77b87a37c3e662c"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:29.972397", 
    "item": {
        "ami_id": "ami-0b77b87a37c3e662c", 
        "architecture": "x86_64", 
        "block_device_mapping": {
            "/dev/sda1": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 75, 
                "snapshot_id": "snap-02ec23d4818f2747e", 
                "volume_type": "gp2"
            }, 
            "/dev/sdb": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 50, 
                "snapshot_id": "snap-0d8726e441d4ca329", 
                "volume_type": "gp2"
            }
        }, 
        "creationDate": "2018-06-26T22:18:53.000Z", 
        "description": "OpenShift Origin development AMI on rhel at the base stage.", 
        "hypervisor": "xen", 
        "is_public": false, 
        "location": "531415883065/ami_build_origin_int_rhel_base_760", 
        "name": "ami_build_origin_int_rhel_base_760", 
        "owner_id": "531415883065", 
        "platform": null, 
        "root_device_name": "/dev/sda1", 
        "root_device_type": "ebs", 
        "state": "available", 
        "tags": {
            "Name": "ami_build_origin_int_rhel_base_760", 
            "image_stage": "base", 
            "operating_system": "rhel", 
            "ready": "yes"
        }, 
        "virtualization_type": "hvm"
    }
}

TASK [aws-up : determine which AMI to use] *************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:55
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_ami_id": "ami-0b77b87a37c3e662c"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:30.007695"
}

TASK [aws-up : determine which subnets are available] **************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:60
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:30.939940", 
    "subnets": [
        {
            "availability_zone": "us-east-1c", 
            "available_ip_address_count": 4044, 
            "cidr_block": "172.18.16.0/20", 
            "default_for_az": "false", 
            "id": "subnet-8bdb5ac2", 
            "map_public_ip_on_launch": "true", 
            "state": "available", 
            "tags": {
                "Name": "devenv-subnet-2", 
                "origin_ci_aws_cluster_component": "master_subnet"
            }, 
            "vpc_id": "vpc-69705d0c"
        }, 
        {
            "availability_zone": "us-east-1d", 
            "available_ip_address_count": 4036, 
            "cidr_block": "172.18.0.0/20", 
            "default_for_az": "false", 
            "id": "subnet-cf57c596", 
            "map_public_ip_on_launch": "true", 
            "state": "available", 
            "tags": {
                "Name": "devenv-subnet-1", 
                "origin_ci_aws_cluster_component": "master_subnet"
            }, 
            "vpc_id": "vpc-69705d0c"
        }
    ]
}

TASK [aws-up : determine which subnets to use for the master] ******************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:67
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_master_subnet_ids": [
            "subnet-8bdb5ac2", 
            "subnet-cf57c596"
        ]
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:30.979868"
}

TASK [aws-up : determine which security groups are available] ******************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:72
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:32.006500", 
    "security_groups": [
        {
            "description": "default VPC security group", 
            "group_id": "sg-7e73221a", 
            "group_name": "default", 
            "ip_permissions": [
                {
                    "ip_protocol": "-1", 
                    "ip_ranges": [], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "user_id_group_pairs": [
                        {
                            "group_id": "sg-7e73221a", 
                            "user_id": "531415883065"
                        }
                    ]
                }, 
                {
                    "from_port": 53, 
                    "ip_protocol": "tcp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "119.254.120.64/26"
                        }, 
                        {
                            "cidr_ip": "209.132.176.0/20"
                        }, 
                        {
                            "cidr_ip": "209.132.186.34/32"
                        }, 
                        {
                            "cidr_ip": "213.175.37.10/32"
                        }, 
                        {
                            "cidr_ip": "62.40.79.66/32"
                        }, 
                        {
                            "cidr_ip": "66.187.224.0/20"
                        }, 
                        {
                            "cidr_ip": "66.187.239.0/24"
                        }, 
                        {
                            "cidr_ip": "38.140.108.0/24"
                        }, 
                        {
                            "cidr_ip": "213.175.37.9/32"
                        }, 
                        {
                            "cidr_ip": "38.99.12.232/29"
                        }, 
                        {
                            "cidr_ip": "4.14.33.72/30"
                        }, 
                        {
                            "cidr_ip": "4.14.35.88/29"
                        }, 
                        {
                            "cidr_ip": "50.227.40.96/29"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 8444, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": 22, 
                    "ip_protocol": "tcp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "0.0.0.0/0"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 22, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": 80, 
                    "ip_protocol": "tcp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "54.241.19.245/32"
                        }, 
                        {
                            "cidr_ip": "97.65.119.184/29"
                        }, 
                        {
                            "cidr_ip": "107.20.219.35/32"
                        }, 
                        {
                            "cidr_ip": "108.166.48.153/32"
                        }, 
                        {
                            "cidr_ip": "212.199.177.64/27"
                        }, 
                        {
                            "cidr_ip": "212.72.208.162/32"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 443, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": 53, 
                    "ip_protocol": "udp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "209.132.176.0/20"
                        }, 
                        {
                            "cidr_ip": "66.187.224.0/20"
                        }, 
                        {
                            "cidr_ip": "66.187.239.0/24"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 53, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": 0, 
                    "ip_protocol": "udp", 
                    "ip_ranges": [], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 65535, 
                    "user_id_group_pairs": [
                        {
                            "group_id": "sg-0d1a5377", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-5875023f", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-7e73221a", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-e1760186", 
                            "user_id": "531415883065"
                        }
                    ]
                }, 
                {
                    "from_port": 3389, 
                    "ip_protocol": "tcp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "0.0.0.0/0"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 3389, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": -1, 
                    "ip_protocol": "icmp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "0.0.0.0/0"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": -1, 
                    "user_id_group_pairs": []
                }
            ], 
            "ip_permissions_egress": [
                {
                    "ip_protocol": "-1", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "0.0.0.0/0"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "user_id_group_pairs": []
                }
            ], 
            "owner_id": "531415883065", 
            "tags": {
                "Name": "devenv-vpc", 
                "openshift_infra": "true", 
                "origin_ci_aws_cluster_component": "master_security_group"
            }, 
            "vpc_id": "vpc-69705d0c"
        }
    ]
}

TASK [aws-up : determine which security group to use] **************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:79
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_master_security_group_ids": [
            "sg-7e73221a"
        ]
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:32.046737"
}

TASK [aws-up : provision an AWS EC2 instance] **********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:84
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-06 11:38:54.366939", 
    "instance_ids": [
        "i-03f1b8259c215731f"
    ], 
    "instances": [
        {
            "ami_launch_index": "0", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-03eae6f84a19d5d4a"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-0afc8078905a83f21"
                }
            }, 
            "dns_name": "ec2-18-215-182-23.compute-1.amazonaws.com", 
            "ebs_optimized": false, 
            "groups": {
                "sg-7e73221a": "default"
            }, 
            "hypervisor": "xen", 
            "id": "i-03f1b8259c215731f", 
            "image_id": "ami-0b77b87a37c3e662c", 
            "instance_type": "m4.xlarge", 
            "kernel": null, 
            "key_name": "libra", 
            "launch_time": "2019-09-06T15:38:33.000Z", 
            "placement": "us-east-1c", 
            "private_dns_name": "ip-172-18-17-249.ec2.internal", 
            "private_ip": "172.18.17.249", 
            "public_dns_name": "ec2-18-215-182-23.compute-1.amazonaws.com", 
            "public_ip": "18.215.182.23", 
            "ramdisk": null, 
            "region": "us-east-1", 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "running", 
            "state_code": 16, 
            "tags": {
                "Name": "pull-ci-openshift-kubernetes-autoscaler-master-e2e_41", 
                "openshift_etcd": "", 
                "openshift_master": "", 
                "openshift_node": ""
            }, 
            "tenancy": "default", 
            "virtualization_type": "hvm"
        }
    ], 
    "tagged_instances": []
}

TASK [aws-up : determine the host address] *************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:110
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_host": "172.18.17.249"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:54.405216"
}

TASK [aws-up : determine the default user to use for SSH] **********************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:114
skipping: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:54.436714", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [aws-up : determine the default user to use for SSH] **********************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:119
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_ssh_user": "origin"
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:54.472049"
}

TASK [aws-up : update variables for the host] **********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:124
changed: [localhost] => {
    "changed": true, 
    "checksum": "ee15cfe13c7ec28adfa3d6e5aa7847760d22905e", 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.17.249.yml", 
    "generated_timestamp": "2019-09-06 11:38:54.782794", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "f9766bdb7fd91c9ac7ba7b091daf5e99", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 769, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567784334.64-149964845028324/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : determine where updated SSH configuration should go] ************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:141
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_ssh_config_files": [
            "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config"
        ]
    }, 
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:54.820913"
}

TASK [aws-up : determine where updated SSH configuration should go] ************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:146
skipping: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:38:54.852722", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [aws-up : ensure the targeted SSH configuration file exists] **************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:151
changed: [localhost] => (item=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config) => {
    "changed": true, 
    "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config", 
    "generated_timestamp": "2019-09-06 11:38:55.019813", 
    "gid": 995, 
    "group": "jenkins", 
    "item": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 0, 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : update the SSH configuration] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:157
changed: [localhost] => (item=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config) => {
    "changed": true, 
    "generated_timestamp": "2019-09-06 11:38:55.313696", 
    "item": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config", 
    "msg": "Block inserted"
}

TASK [aws-up : wait for SSH to be available] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:175
ok: [localhost] => {
    "changed": false, 
    "elapsed": 61, 
    "generated_timestamp": "2019-09-06 11:39:56.693598", 
    "path": null, 
    "port": 22, 
    "search_regex": null, 
    "state": "started"
}

PLAY RECAP *********************************************************************
localhost                  : ok=28   changed=13   unreachable=0    failed=0   

+ set +o xtrace
########## FINISHED STAGE: SUCCESS: PROVISION CLOUD RESOURCES [00h 01m 34s] ##########
[workspace] $ /bin/bash /tmp/jenkins6734933503586008665.sh
########## STARTING STAGE: FORWARD GCS CREDENTIALS TO REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
+ (( i = 0 ))
+ (( i < 10 ))
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.config/gcloud/gcs-publisher-credentials.json openshiftdevel:/data/credentials.json
+ break
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: FORWARD GCS CREDENTIALS TO REMOTE HOST [00h 00m 02s] ##########
[workspace] $ /bin/bash /tmp/jenkins6233007345212840119.sh
########## STARTING STAGE: FORWARD PARAMETERS TO THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod o+rw /etc/environment
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-kubernetes-autoscaler-master-e2e","buildid":"1169998239923965952","prowjobid":"44835668-d0bc-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"kubernetes-autoscaler","repo_link":"https://github.com/openshift/kubernetes-autoscaler","base_ref":"master","base_sha":"18a08df116691fed1236f4e53a67614dbc85b1fb","base_link":"https://github.com/openshift/kubernetes-autoscaler/commit/18a08df116691fed1236f4e53a67614dbc85b1fb","pulls":[{"number":116,"author":"frobware","sha":"470bd635e18fe2399da3e23cf71c3d649266c164","link":"https://github.com/openshift/kubernetes-autoscaler/pull/116","commit_link":"https://github.com/openshift/kubernetes-autoscaler/pull/116/commits/470bd635e18fe2399da3e23cf71c3d649266c164","author_link":"https://github.com/frobware"}]}}'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''buildId='\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_ID=1169998239923965952'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''REPO_OWNER=openshift'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''REPO_NAME=kubernetes-autoscaler'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_BASE_REF=master'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_BASE_SHA=18a08df116691fed1236f4e53a67614dbc85b1fb'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_REFS=master:18a08df116691fed1236f4e53a67614dbc85b1fb,116:470bd635e18fe2399da3e23cf71c3d649266c164'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_NUMBER=116'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_PULL_SHA=470bd635e18fe2399da3e23cf71c3d649266c164'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-kubernetes-autoscaler-master-e2e","buildid":"1169998239923965952","prowjobid":"44835668-d0bc-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"kubernetes-autoscaler","repo_link":"https://github.com/openshift/kubernetes-autoscaler","base_ref":"master","base_sha":"18a08df116691fed1236f4e53a67614dbc85b1fb","base_link":"https://github.com/openshift/kubernetes-autoscaler/commit/18a08df116691fed1236f4e53a67614dbc85b1fb","pulls":[{"number":116,"author":"frobware","sha":"470bd635e18fe2399da3e23cf71c3d649266c164","link":"https://github.com/openshift/kubernetes-autoscaler/pull/116","commit_link":"https://github.com/openshift/kubernetes-autoscaler/pull/116/commits/470bd635e18fe2399da3e23cf71c3d649266c164","author_link":"https://github.com/frobware"}]}}'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_NUMBER=41'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''CLONEREFS_ARGS='\'' >> /etc/environment'
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: FORWARD PARAMETERS TO THE REMOTE HOST [00h 00m 05s] ##########
[workspace] $ /bin/bash /tmp/jenkins3167889976586139652.sh
########## STARTING STAGE: SYNC REPOSITORIES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.HKdN9uRo7m
+ cat
+ chmod +x /tmp/tmp.HKdN9uRo7m
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.HKdN9uRo7m openshiftdevel:/tmp/tmp.HKdN9uRo7m
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.HKdN9uRo7m"'
+ cd /home/origin
++ jq --compact-output '.buildid |= "41"'
+ JOB_SPEC='{"type":"presubmit","job":"pull-ci-openshift-kubernetes-autoscaler-master-e2e","buildid":"41","prowjobid":"44835668-d0bc-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"kubernetes-autoscaler","repo_link":"https://github.com/openshift/kubernetes-autoscaler","base_ref":"master","base_sha":"18a08df116691fed1236f4e53a67614dbc85b1fb","base_link":"https://github.com/openshift/kubernetes-autoscaler/commit/18a08df116691fed1236f4e53a67614dbc85b1fb","pulls":[{"number":116,"author":"frobware","sha":"470bd635e18fe2399da3e23cf71c3d649266c164","link":"https://github.com/openshift/kubernetes-autoscaler/pull/116","commit_link":"https://github.com/openshift/kubernetes-autoscaler/pull/116/commits/470bd635e18fe2399da3e23cf71c3d649266c164","author_link":"https://github.com/frobware"}]}}'
+ for image in ''\''registry.svc.ci.openshift.org/ci/clonerefs:latest'\''' ''\''registry.svc.ci.openshift.org/ci/initupload:latest'\'''
+ (( i = 0 ))
+ (( i < 5 ))
+ docker pull registry.svc.ci.openshift.org/ci/clonerefs:latest
Trying to pull repository registry.svc.ci.openshift.org/ci/clonerefs ... 
latest: Pulling from registry.svc.ci.openshift.org/ci/clonerefs
1160f4abea84: Pulling fs layer
be60dbe7622d: Pulling fs layer
d26b76701841: Pulling fs layer
1b90cab916ea: Pulling fs layer
3a00cbb24bdb: Pulling fs layer
1b90cab916ea: Waiting
3a00cbb24bdb: Waiting
be60dbe7622d: Verifying Checksum
be60dbe7622d: Download complete
1160f4abea84: Download complete
d26b76701841: Verifying Checksum
d26b76701841: Download complete
3a00cbb24bdb: Verifying Checksum
3a00cbb24bdb: Download complete
1b90cab916ea: Verifying Checksum
1b90cab916ea: Download complete
1160f4abea84: Pull complete
be60dbe7622d: Pull complete
d26b76701841: Pull complete
1b90cab916ea: Pull complete
3a00cbb24bdb: Pull complete
Digest: sha256:d68e1c6c2de5c1167a79b24d5ba4f909349ca7a44fb634e214bdadc2c8b010cd
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/clonerefs:latest
+ break
+ for image in ''\''registry.svc.ci.openshift.org/ci/clonerefs:latest'\''' ''\''registry.svc.ci.openshift.org/ci/initupload:latest'\'''
+ (( i = 0 ))
+ (( i < 5 ))
+ docker pull registry.svc.ci.openshift.org/ci/initupload:latest
Trying to pull repository registry.svc.ci.openshift.org/ci/initupload ... 
latest: Pulling from registry.svc.ci.openshift.org/ci/initupload
a073c86ecf9e: Pulling fs layer
cc3fc741b1a9: Pulling fs layer
8f72556ef119: Pulling fs layer
8e5b170ec95b: Pulling fs layer
cc3fc741b1a9: Download complete
a073c86ecf9e: Verifying Checksum
a073c86ecf9e: Download complete
8f72556ef119: Verifying Checksum
8f72556ef119: Download complete
8e5b170ec95b: Verifying Checksum
8e5b170ec95b: Download complete
a073c86ecf9e: Pull complete
cc3fc741b1a9: Pull complete
8f72556ef119: Pull complete
8e5b170ec95b: Pull complete
Digest: sha256:e651a6455ada7c070c439eddcd753e2e2ac1fb934c4f2a526c37a4674c8eaee4
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/initupload:latest
+ break
+ clonerefs_args='--repo=openshift,cluster-api-provider-kubemark=master --repo=openshift,machine-api-operator=master --repo=openshift,cluster-autoscaler-operator=master --repo=openshift,cluster-api-actuator-pkg=master '
+ docker run -v /data:/data:z registry.svc.ci.openshift.org/ci/clonerefs:latest --src-root=/data --log=/data/clone.json --repo=openshift,kubernetes-autoscaler=master:18a08df116691fed1236f4e53a67614dbc85b1fb,116:470bd635e18fe2399da3e23cf71c3d649266c164 --repo=openshift,cluster-api-provider-kubemark=master --repo=openshift,machine-api-operator=master --repo=openshift,cluster-autoscaler-operator=master --repo=openshift,cluster-api-actuator-pkg=master
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"kubernetes-autoscaler","base_ref":"master","base_sha":"18a08df116691fed1236f4e53a67614dbc85b1fb","pulls":[{"number":116,"author":"","sha":"470bd635e18fe2399da3e23cf71c3d649266c164"}]},"time":"2019-09-06T15:40:53Z"}
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-api-actuator-pkg","base_ref":"master"},"time":"2019-09-06T15:40:53Z"}
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-api-provider-kubemark","base_ref":"master"},"time":"2019-09-06T15:40:53Z"}
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"machine-api-operator","base_ref":"master"},"time":"2019-09-06T15:40:53Z"}
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-autoscaler-operator","base_ref":"master"},"time":"2019-09-06T15:40:53Z"}
{"command":"mkdir -p /data/src/github.com/openshift/kubernetes-autoscaler","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"mkdir -p /data/src/github.com/openshift/cluster-api-actuator-pkg","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/kubernetes-autoscaler/.git/\n","time":"2019-09-06T15:40:53Z"}
{"command":"mkdir -p /data/src/github.com/openshift/cluster-api-provider-kubemark","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"mkdir -p /data/src/github.com/openshift/machine-api-operator","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"mkdir -p /data/src/github.com/openshift/cluster-autoscaler-operator","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-api-actuator-pkg/.git/\n","time":"2019-09-06T15:40:53Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-api-provider-kubemark/.git/\n","time":"2019-09-06T15:40:53Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/machine-api-operator/.git/\n","time":"2019-09-06T15:40:53Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-autoscaler-operator/.git/\n","time":"2019-09-06T15:40:53Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:53Z"}
{"command":"git fetch https://github.com/openshift/machine-api-operator.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/machine-api-operator\n * branch            HEAD       -\u003e FETCH_HEAD\n * [new tag]         v0.1.0     -\u003e v0.1.0\n * [new tag]         v0.2.0     -\u003e v0.2.0\n","time":"2019-09-06T15:40:55Z"}
{"command":"git fetch https://github.com/openshift/machine-api-operator.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/machine-api-operator\n * branch            master     -\u003e FETCH_HEAD\n","time":"2019-09-06T15:40:56Z"}
{"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n  git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 474e14e... Merge pull request #391 from mgugino-upstream-stage/related-resources\n","time":"2019-09-06T15:40:57Z"}
{"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:57Z"}
{"command":"git fetch https://github.com/openshift/cluster-autoscaler-operator.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-autoscaler-operator\n * branch              HEAD       -\u003e FETCH_HEAD\n * [new tag]           v0.0.0     -\u003e v0.0.0\n","time":"2019-09-06T15:40:57Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-06T15:40:57Z"}
{"command":"git fetch https://github.com/openshift/cluster-autoscaler-operator.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-autoscaler-operator\n * branch              master     -\u003e FETCH_HEAD\n","time":"2019-09-06T15:40:57Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:58Z"}
{"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n  git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 045aea45... Merge pull request #117 from enxebre/more-related-objects\n","time":"2019-09-06T15:40:58Z"}
{"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:58Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-06T15:40:58Z"}
{"command":"git fetch https://github.com/openshift/cluster-api-provider-kubemark.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-kubemark\n * branch              HEAD       -\u003e FETCH_HEAD\n * [new tag]           v1.0       -\u003e v1.0\n","time":"2019-09-06T15:40:59Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:40:59Z"}
{"command":"git fetch https://github.com/openshift/cluster-api-provider-kubemark.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-kubemark\n * branch              master     -\u003e FETCH_HEAD\n","time":"2019-09-06T15:40:59Z"}
{"command":"git fetch https://github.com/openshift/cluster-api-actuator-pkg.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-actuator-pkg\n * branch              HEAD       -\u003e FETCH_HEAD\n","time":"2019-09-06T15:40:59Z"}
{"command":"git fetch https://github.com/openshift/cluster-api-actuator-pkg.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-actuator-pkg\n * branch              master     -\u003e FETCH_HEAD\n","time":"2019-09-06T15:41:00Z"}
{"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n  git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 8250b456... Merge pull request #114 from frobware/better-teardown-in-e2e-autoscaler\n","time":"2019-09-06T15:41:00Z"}
{"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:41:00Z"}
{"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n  git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 45659b31... Merge pull request #27 from frobware/bump-openshift-cluster-api-deps\n","time":"2019-09-06T15:41:00Z"}
{"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:41:00Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-06T15:41:00Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:41:01Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-06T15:41:01Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:41:01Z"}
{"command":"git fetch https://github.com/openshift/kubernetes-autoscaler.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/kubernetes-autoscaler\n * branch                HEAD                        -\u003e FETCH_HEAD\n * [new tag]             addon-resizer-1.8.0         -\u003e addon-resizer-1.8.0\n * [new tag]             addon-resizer-1.8.1         -\u003e addon-resizer-1.8.1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.37.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.37.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.38.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.38.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.39.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.39.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.40.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.40.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.41.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.41.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.42.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.42.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.43.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.43.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.44.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.44.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.46.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.46.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.47.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.47.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.50.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.50.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.51.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.51.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.52.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.52.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.53.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.53.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.54.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.54.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.56.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.56.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.57.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.57.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.58.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.58.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.60.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.60.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.61.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.61.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.63.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.63.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.64.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.64.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.65.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.65.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.66.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.66.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.67.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.67.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.68.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.68.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-0.69.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.69.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.0-1666 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-1666\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.1-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.1-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.10-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.10-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.11-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.11-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.12-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.12-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.13-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.13-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.14-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.14-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.15-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.15-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.16-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.16-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.17-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.17-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.18-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.18-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.2-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.2-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.21-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.21-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.22-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.22-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.23-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.23-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.3-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.3-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.5-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.5-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.6-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.6-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.7-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.7-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.8-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.8-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.10.9-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.9-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.10.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.10.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.11.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.11.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.13.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.13.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.14.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.14.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.15.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.15.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.16.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.16.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.17.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.17.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.18.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.18.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.19.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.19.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.20.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.20.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.21.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.21.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.22.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.22.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.23.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.23.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.24.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.24.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.25.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.25.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.26.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.26.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.27.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.27.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.28.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.28.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.30.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.30.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.32.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.32.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.5.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.5.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.7.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.7.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.8.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.8.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.0-0.9.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.9.0\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.100-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.100-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.104-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.104-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.105-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.105-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.106-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.106-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.107-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.107-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.108-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.108-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.109-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.109-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.11-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.11-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.110-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.110-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.111-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.111-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.112-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.112-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.113-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.113-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.114-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.114-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.115-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.115-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.116-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.116-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.117-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.117-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.119-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.119-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.12-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.12-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.121-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.121-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.122-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.122-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.123-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.123-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.124-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.124-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.125-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.125-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.126-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.126-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.127-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.127-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.128-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.128-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.129-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.129-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.13-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.13-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.130-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.130-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.131-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.131-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.132-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.132-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.133-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.133-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.134-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.134-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.135-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.135-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.136-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.136-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.137-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.137-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.138-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.138-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.139-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.139-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.14-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.14-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.140-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.140-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.141-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.141-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.142-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.142-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.143-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.143-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.15-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.15-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.16-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.16-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.17-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.17-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.18-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.18-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.19-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.19-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.20-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.20-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.21-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.21-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.22-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.22-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.23-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.23-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.24-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.24-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.25-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.25-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.26-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.26-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.27-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.27-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.28-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.28-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.29-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.29-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.3-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.3-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.30-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.30-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.31-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.31-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.32-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.32-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.33-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.33-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.34-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.34-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.35-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.35-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.36-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.36-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.37-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.37-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.38-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.38-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.39-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.39-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.4-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.4-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.40-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.40-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.41-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.41-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.42-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.42-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.43-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.43-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.44-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.44-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.45-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.45-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.46-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.46-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.47-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.47-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.49-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.49-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.5-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.5-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.50-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.50-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.51-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.51-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.53-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.53-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.54-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.54-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.55-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.55-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.56-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.56-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.57-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.57-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.58-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.58-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.59-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.59-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.6-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.6-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.60-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.60-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.61-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.61-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.62-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.62-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.63-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.63-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.64-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.64-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.65-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.65-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.66-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.66-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.67-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.67-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.69-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.69-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.7-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.7-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.71-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.71-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.72-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.72-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.73-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.73-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.74-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.74-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.75-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.75-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.76-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.76-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.77-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.77-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.78-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.78-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.79-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.79-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.8-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.8-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.81-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.81-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.82-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.82-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.83-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.83-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.85-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.85-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.86-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.86-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.87-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.87-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.88-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.88-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.9-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.9-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.90-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.90-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.91-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.91-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.92-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.92-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.93-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.93-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.94-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.94-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.95-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.95-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.96-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.96-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.97-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.97-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.98-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.98-1\n * [new tag]             atomic-openshift-cluster-autoscaler-3.11.99-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.99-1\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.10.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.10.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.100.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.100.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.101.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.101.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.102.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.102.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.103.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.103.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.104.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.104.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.105.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.105.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.106.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.106.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.107.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.107.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.109.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.109.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.110.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.110.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.112.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.112.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.114.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.114.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.115.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.115.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.116.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.116.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.117.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.117.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.118.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.118.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.119.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.119.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.12.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.12.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.122.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.122.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.123.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.123.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.124.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.124.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.125.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.125.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.128.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.128.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.13.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.13.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.130.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.130.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.131.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.131.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.132.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.132.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.136.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.136.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.137.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.137.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.138.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.138.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.139.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.139.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.14.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.14.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.140.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.140.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.141.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.141.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.142.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.142.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.143.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.143.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.144.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.144.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.145.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.145.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.146.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.146.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.147.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.147.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.148.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.148.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.149.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.149.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.15.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.15.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.17.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.17.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.18.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.18.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.19.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.19.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.20.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.20.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.21.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.21.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.22.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.22.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.23.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.23.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.24.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.24.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.25.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.25.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.26.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.26.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.27.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.27.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.28.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.28.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.29.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.29.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.30.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.30.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.31.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.31.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.32.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.32.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.33.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.33.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.36.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.36.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.37.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.37.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.38.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.38.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.39.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.39.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.4.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.4.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.40.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.40.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.41.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.41.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.42.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.42.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.43.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.43.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.44.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.44.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.45.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.45.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.46.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.46.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.47.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.47.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.48.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.48.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.49.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.49.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.5.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.5.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.50.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.50.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.51.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.51.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.52.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.52.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.55.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.55.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.56.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.56.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.57.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.57.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.58.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.58.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.59.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.59.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.6.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.6.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.60.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.60.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.62.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.62.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.63.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.63.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.66.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.66.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.69.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.69.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.7.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.7.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.70.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.70.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.72.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.72.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.74.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.74.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.75.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.75.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.76.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.76.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.77.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.77.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.79.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.79.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.8.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.8.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.80.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.80.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.81.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.81.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.82.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.82.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.83.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.83.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.84.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.84.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.85.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.85.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.87.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.87.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.88.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.88.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.89.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.89.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.9.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.9.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.91.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.91.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.92.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.92.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.93.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.93.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.94.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.94.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.95.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.95.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.96.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.96.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.97.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.97.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.98.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.98.0\n * [new tag]             atomic-openshift-cluster-autoscaler-4.0.0-0.99.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.99.0\n * [new tag]             cluster-autoscaler-0.5.2    -\u003e cluster-autoscaler-0.5.2\n * [new tag]             cluster-autoscaler-0.5.3    -\u003e cluster-autoscaler-0.5.3\n * [new tag]             cluster-autoscaler-0.5.4    -\u003e cluster-autoscaler-0.5.4\n * [new tag]             cluster-autoscaler-0.6.0    -\u003e cluster-autoscaler-0.6.0\n * [new tag]             cluster-autoscaler-0.6.1    -\u003e cluster-autoscaler-0.6.1\n * [new tag]             cluster-autoscaler-0.6.2    -\u003e cluster-autoscaler-0.6.2\n * [new tag]             cluster-autoscaler-0.6.3    -\u003e cluster-autoscaler-0.6.3\n * [new tag]             cluster-autoscaler-0.6.4    -\u003e cluster-autoscaler-0.6.4\n * [new tag]             cluster-autoscaler-1.0.0    -\u003e cluster-autoscaler-1.0.0\n * [new tag]             cluster-autoscaler-1.0.1    -\u003e cluster-autoscaler-1.0.1\n * [new tag]             cluster-autoscaler-1.0.2    -\u003e cluster-autoscaler-1.0.2\n * [new tag]             cluster-autoscaler-1.0.3    -\u003e cluster-autoscaler-1.0.3\n * [new tag]             cluster-autoscaler-1.0.4    -\u003e cluster-autoscaler-1.0.4\n * [new tag]             cluster-autoscaler-1.0.5    -\u003e cluster-autoscaler-1.0.5\n * [new tag]             cluster-autoscaler-1.1.0    -\u003e cluster-autoscaler-1.1.0\n * [new tag]             cluster-autoscaler-1.1.1    -\u003e cluster-autoscaler-1.1.1\n * [new tag]             cluster-autoscaler-1.1.2    -\u003e cluster-autoscaler-1.1.2\n * [new tag]             cluster-autoscaler-1.2.0    -\u003e cluster-autoscaler-1.2.0\n * [new tag]             cluster-autoscaler-1.2.1    -\u003e cluster-autoscaler-1.2.1\n * [new tag]             cluster-autoscaler-1.2.2    -\u003e cluster-autoscaler-1.2.2\n * [new tag]             v3.10.0                     -\u003e v3.10.0\n * [new tag]             v3.10.0-alpha.0             -\u003e v3.10.0-alpha.0\n * [new tag]             v3.10.0-rc.0                -\u003e v3.10.0-rc.0\n * [new tag]             v3.11                       -\u003e v3.11\n * [new tag]             v3.11.0                     -\u003e v3.11.0\n * [new tag]             v3.11.0-alpha.0             -\u003e v3.11.0-alpha.0\n * [new tag]             vertical-pod-autoscaler-0.1 -\u003e vertical-pod-autoscaler-0.1\n","time":"2019-09-06T15:41:03Z"}
{"command":"git fetch https://github.com/openshift/kubernetes-autoscaler.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/kubernetes-autoscaler\n * branch                master     -\u003e FETCH_HEAD\n","time":"2019-09-06T15:41:03Z"}
{"command":"git checkout 18a08df116691fed1236f4e53a67614dbc85b1fb","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out '18a08df116691fed1236f4e53a67614dbc85b1fb'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n  git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 18a08df11... Merge pull request #114 from ingvagabund/goimports-makefile\n","time":"2019-09-06T15:41:06Z"}
{"command":"git branch --force master 18a08df116691fed1236f4e53a67614dbc85b1fb","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:41:06Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-06T15:41:06Z"}
{"command":"git fetch https://github.com/openshift/kubernetes-autoscaler.git pull/116/head","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/kubernetes-autoscaler\n * branch                refs/pull/116/head -\u003e FETCH_HEAD\n","time":"2019-09-06T15:41:06Z"}
{"command":"git merge --no-ff 470bd635e18fe2399da3e23cf71c3d649266c164","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Removing cluster-autoscaler/processors/nodegroupset/openshiftmachineapi_compare_nodegroups.go\nMerge made by the 'recursive' strategy.\n cluster-autoscaler/main.go                         |  9 ---\n .../processors/nodegroupset/compare_nodegroups.go  | 27 +++++--\n .../nodegroupset/compare_nodegroups_test.go        | 14 ++++\n .../openshiftmachineapi_compare_nodegroups.go      | 90 ----------------------\n 4 files changed, 36 insertions(+), 104 deletions(-)\n delete mode 100644 cluster-autoscaler/processors/nodegroupset/openshiftmachineapi_compare_nodegroups.go\n","time":"2019-09-06T15:41:07Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T15:41:07Z"}
{"component":"clonerefs","file":"prow/cmd/clonerefs/main.go:43","func":"main.main","level":"info","msg":"Finished cloning refs","time":"2019-09-06T15:41:07Z"}
+ docker run -e 'JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-kubernetes-autoscaler-master-e2e","buildid":"41","prowjobid":"44835668-d0bc-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"kubernetes-autoscaler","repo_link":"https://github.com/openshift/kubernetes-autoscaler","base_ref":"master","base_sha":"18a08df116691fed1236f4e53a67614dbc85b1fb","base_link":"https://github.com/openshift/kubernetes-autoscaler/commit/18a08df116691fed1236f4e53a67614dbc85b1fb","pulls":[{"number":116,"author":"frobware","sha":"470bd635e18fe2399da3e23cf71c3d649266c164","link":"https://github.com/openshift/kubernetes-autoscaler/pull/116","commit_link":"https://github.com/openshift/kubernetes-autoscaler/pull/116/commits/470bd635e18fe2399da3e23cf71c3d649266c164","author_link":"https://github.com/frobware"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/initupload:latest --clone-log=/data/clone.json --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin
{"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-kubernetes-autoscaler-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T15:41:10Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_kubernetes-autoscaler/116/pull-ci-openshift-kubernetes-autoscaler-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T15:41:10Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_kubernetes-autoscaler/116/pull-ci-openshift-kubernetes-autoscaler-master-e2e/41/started.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T15:41:10Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_kubernetes-autoscaler/116/pull-ci-openshift-kubernetes-autoscaler-master-e2e/41/clone-log.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T15:41:10Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_kubernetes-autoscaler/116/pull-ci-openshift-kubernetes-autoscaler-master-e2e/41/clone-records.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T15:41:10Z"}
{"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-kubernetes-autoscaler-master-e2e/41.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T15:41:10Z"}
{"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-kubernetes-autoscaler-master-e2e/41.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T15:41:10Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_kubernetes-autoscaler/116/pull-ci-openshift-kubernetes-autoscaler-master-e2e/41/started.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T15:41:10Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_kubernetes-autoscaler/116/pull-ci-openshift-kubernetes-autoscaler-master-e2e/41/clone-records.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T15:41:10Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_kubernetes-autoscaler/116/pull-ci-openshift-kubernetes-autoscaler-master-e2e/41/clone-log.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T15:41:10Z"}
{"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-kubernetes-autoscaler-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T15:41:10Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_kubernetes-autoscaler/116/pull-ci-openshift-kubernetes-autoscaler-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T15:41:10Z"}
{"component":"initupload","file":"prow/gcsupload/run.go:65","func":"k8s.io/test-infra/prow/gcsupload.Options.Run","level":"info","msg":"Finished upload to GCS","time":"2019-09-06T15:41:10Z"}
+ sudo chmod -R a+rwX /data
+ sudo chown -R origin:origin-git /data
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: SYNC REPOSITORIES [00h 01m 10s] ##########
[workspace] $ /bin/bash /tmp/jenkins3133583284564825469.sh
########## STARTING STAGE: FORWARD PARAMETERS TO THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod o+rw /etc/environment
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_NAME=pull-ci-openshift-kubernetes-autoscaler-master-e2e'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_NUMBER=41'\'' >> /etc/environment'
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: FORWARD PARAMETERS TO THE REMOTE HOST [00h 00m 01s] ##########
[workspace] $ /bin/bash /tmp/jenkins4803858500041751747.sh
########## STARTING STAGE: INSTALL MINIKUBE ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.CHOaePyLvL
+ cat
+ chmod +x /tmp/tmp.CHOaePyLvL
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.CHOaePyLvL openshiftdevel:/tmp/tmp.CHOaePyLvL
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.CHOaePyLvL"'
+ cd /home/origin
+ curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.30.0/minikube-linux-amd64
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100 40.3M  100 40.3M    0     0  33.6M      0  0:00:01  0:00:01 --:--:-- 33.6M
+ chmod +x minikube
+ sudo mv minikube /usr/bin/
+ curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
  6 54.6M    6 3502k    0     0  12.7M      0  0:00:04 --:--:--  0:00:04 12.7M
100 54.6M  100 54.6M    0     0  70.0M      0 --:--:-- --:--:-- --:--:-- 70.0M
+ chmod +x kubectl
+ sudo mv kubectl /usr/bin/
+ sudo yum install -y ebtables
Loaded plugins: amazon-id, rhui-lb, search-disabled-repos
Resolving Dependencies
--> Running transaction check
---> Package ebtables.x86_64 0:2.0.10-16.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

================================================================================
 Package     Arch      Version           Repository                        Size
================================================================================
Installing:
 ebtables    x86_64    2.0.10-16.el7     oso-rhui-rhel-server-releases    123 k

Transaction Summary
================================================================================
Install  1 Package

Total download size: 123 k
Installed size: 343 k
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : ebtables-2.0.10-16.el7.x86_64                                1/1 
  Verifying  : ebtables-2.0.10-16.el7.x86_64                                1/1 

Installed:
  ebtables.x86_64 0:2.0.10-16.el7                                               

Complete!
+ VERSION=v1.13.0
+ wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz
--2019-09-06 15:42:41--  https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz
Resolving github.com (github.com)... 140.82.114.4
Connecting to github.com (github.com)|140.82.114.4|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://github-production-release-asset-2e65be.s3.amazonaws.com/80172100/61627180-fed9-11e8-9958-15e7eb90aa9e?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20190906%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20190906T154241Z&X-Amz-Expires=300&X-Amz-Signature=8af2ddd8beee2ed7acebd5050a6ede4cef1afee0894db0c0e1859d24a23d28dc&X-Amz-SignedHeaders=host&actor_id=0&response-content-disposition=attachment%3B%20filename%3Dcrictl-v1.13.0-linux-amd64.tar.gz&response-content-type=application%2Foctet-stream [following]
--2019-09-06 15:42:41--  https://github-production-release-asset-2e65be.s3.amazonaws.com/80172100/61627180-fed9-11e8-9958-15e7eb90aa9e?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20190906%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20190906T154241Z&X-Amz-Expires=300&X-Amz-Signature=8af2ddd8beee2ed7acebd5050a6ede4cef1afee0894db0c0e1859d24a23d28dc&X-Amz-SignedHeaders=host&actor_id=0&response-content-disposition=attachment%3B%20filename%3Dcrictl-v1.13.0-linux-amd64.tar.gz&response-content-type=application%2Foctet-stream
Resolving github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)... 52.216.170.11
Connecting to github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)|52.216.170.11|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 10631149 (10M) [application/octet-stream]
Saving to: ‘crictl-v1.13.0-linux-amd64.tar.gz’

     0K .......... .......... .......... .......... ..........  0% 83.9M 0s
    50K .......... .......... .......... .......... ..........  0% 85.2M 0s
   100K .......... .......... .......... .......... ..........  1% 50.7M 0s
   150K .......... .......... .......... .......... ..........  1% 86.0M 0s
   200K .......... .......... .......... .......... ..........  2% 99.7M 0s
   250K .......... .......... .......... .......... ..........  2% 64.0M 0s
   300K .......... .......... .......... .......... ..........  3% 98.5M 0s
   350K .......... .......... .......... .......... ..........  3% 87.5M 0s
   400K .......... .......... .......... .......... ..........  4% 95.7M 0s
   450K .......... .......... .......... .......... ..........  4%  125M 0s
   500K .......... .......... .......... .......... ..........  5% 86.3M 0s
   550K .......... .......... .......... .......... ..........  5%  110M 0s
   600K .......... .......... .......... .......... ..........  6% 92.6M 0s
   650K .......... .......... .......... .......... ..........  6% 93.3M 0s
   700K .......... .......... .......... .......... ..........  7%  128M 0s
   750K .......... .......... .......... .......... ..........  7%  115M 0s
   800K .......... .......... .......... .......... ..........  8% 85.2M 0s
   850K .......... .......... .......... .......... ..........  8%  144M 0s
   900K .......... .......... .......... .......... ..........  9% 84.0M 0s
   950K .......... .......... .......... .......... ..........  9% 95.6M 0s
  1000K .......... .......... .......... .......... .......... 10% 88.5M 0s
  1050K .......... .......... .......... .......... .......... 10% 83.4M 0s
  1100K .......... .......... .......... .......... .......... 11% 98.1M 0s
  1150K .......... .......... .......... .......... .......... 11% 84.1M 0s
  1200K .......... .......... .......... .......... .......... 12% 90.9M 0s
  1250K .......... .......... .......... .......... .......... 12% 91.8M 0s
  1300K .......... .......... .......... .......... .......... 13% 84.7M 0s
  1350K .......... .......... .......... .......... .......... 13% 86.7M 0s
  1400K .......... .......... .......... .......... .......... 13% 76.7M 0s
  1450K .......... .......... .......... .......... .......... 14% 85.2M 0s
  1500K .......... .......... .......... .......... .......... 14% 96.3M 0s
  1550K .......... .......... .......... .......... .......... 15%  111M 0s
  1600K .......... .......... .......... .......... .......... 15%  103M 0s
  1650K .......... .......... .......... .......... .......... 16% 80.8M 0s
  1700K .......... .......... .......... .......... .......... 16% 94.1M 0s
  1750K .......... .......... .......... .......... .......... 17%  103M 0s
  1800K .......... .......... .......... .......... .......... 17%  108M 0s
  1850K .......... .......... .......... .......... .......... 18% 82.0M 0s
  1900K .......... .......... .......... .......... .......... 18%  101M 0s
  1950K .......... .......... .......... .......... .......... 19% 79.5M 0s
  2000K .......... .......... .......... .......... .......... 19% 95.3M 0s
  2050K .......... .......... .......... .......... .......... 20% 86.3M 0s
  2100K .......... .......... .......... .......... .......... 20% 74.1M 0s
  2150K .......... .......... .......... .......... .......... 21% 83.5M 0s
  2200K .......... .......... .......... .......... .......... 21% 79.5M 0s
  2250K .......... .......... .......... .......... .......... 22% 88.9M 0s
  2300K .......... .......... .......... .......... .......... 22% 96.5M 0s
  2350K .......... .......... .......... .......... .......... 23% 92.4M 0s
  2400K .......... .......... .......... .......... .......... 23% 86.1M 0s
  2450K .......... .......... .......... .......... .......... 24% 97.2M 0s
  2500K .......... .......... .......... .......... .......... 24% 85.1M 0s
  2550K .......... .......... .......... .......... .......... 25%  102M 0s
  2600K .......... .......... .......... .......... .......... 25% 96.1M 0s
  2650K .......... .......... .......... .......... .......... 26% 99.5M 0s
  2700K .......... .......... .......... .......... .......... 26% 84.9M 0s
  2750K .......... .......... .......... .......... .......... 26% 94.3M 0s
  2800K .......... .......... .......... .......... .......... 27% 94.4M 0s
  2850K .......... .......... .......... .......... .......... 27% 82.4M 0s
  2900K .......... .......... .......... .......... .......... 28% 94.9M 0s
  2950K .......... .......... .......... .......... .......... 28% 87.7M 0s
  3000K .......... .......... .......... .......... .......... 29% 99.9M 0s
  3050K .......... .......... .......... .......... .......... 29% 93.2M 0s
  3100K .......... .......... .......... .......... .......... 30% 92.9M 0s
  3150K .......... .......... .......... .......... .......... 30% 92.8M 0s
  3200K .......... .......... .......... .......... .......... 31% 98.0M 0s
  3250K .......... .......... .......... .......... .......... 31% 87.0M 0s
  3300K .......... .......... .......... .......... .......... 32% 88.6M 0s
  3350K .......... .......... .......... .......... .......... 32% 95.9M 0s
  3400K .......... .......... .......... .......... .......... 33%  103M 0s
  3450K .......... .......... .......... .......... .......... 33%  109M 0s
  3500K .......... .......... .......... .......... .......... 34% 89.6M 0s
  3550K .......... .......... .......... .......... .......... 34% 86.8M 0s
  3600K .......... .......... .......... .......... .......... 35% 87.8M 0s
  3650K .......... .......... .......... .......... .......... 35% 89.9M 0s
  3700K .......... .......... .......... .......... .......... 36% 93.3M 0s
  3750K .......... .......... .......... .......... .......... 36% 93.7M 0s
  3800K .......... .......... .......... .......... .......... 37% 88.8M 0s
  3850K .......... .......... .......... .......... .......... 37% 98.7M 0s
  3900K .......... .......... .......... .......... .......... 38% 93.9M 0s
  3950K .......... .......... .......... .......... .......... 38% 89.1M 0s
  4000K .......... .......... .......... .......... .......... 39% 93.1M 0s
  4050K .......... .......... .......... .......... .......... 39% 98.9M 0s
  4100K .......... .......... .......... .......... .......... 39% 85.8M 0s
  4150K .......... .......... .......... .......... .......... 40% 90.5M 0s
  4200K .......... .......... .......... .......... .......... 40% 95.7M 0s
  4250K .......... .......... .......... .......... .......... 41%  104M 0s
  4300K .......... .......... .......... .......... .......... 41%  109M 0s
  4350K .......... .......... .......... .......... .......... 42%  100M 0s
  4400K .......... .......... .......... .......... .......... 42% 87.2M 0s
  4450K .......... .......... .......... .......... .......... 43% 99.9M 0s
  4500K .......... .......... .......... .......... .......... 43% 97.6M 0s
  4550K .......... .......... .......... .......... .......... 44% 90.2M 0s
  4600K .......... .......... .......... .......... .......... 44% 94.9M 0s
  4650K .......... .......... .......... .......... .......... 45% 95.1M 0s
  4700K .......... .......... .......... .......... .......... 45% 99.9M 0s
  4750K .......... .......... .......... .......... .......... 46% 98.0M 0s
  4800K .......... .......... .......... .......... .......... 46% 87.0M 0s
  4850K .......... .......... .......... .......... .......... 47% 97.4M 0s
  4900K .......... .......... .......... .......... .......... 47% 79.5M 0s
  4950K .......... .......... .......... .......... .......... 48%  101M 0s
  5000K .......... .......... .......... .......... .......... 48% 93.5M 0s
  5050K .......... .......... .......... .......... .......... 49% 84.3M 0s
  5100K .......... .......... .......... .......... .......... 49%  118M 0s
  5150K .......... .......... .......... .......... .......... 50%  101M 0s
  5200K .......... .......... .......... .......... .......... 50% 86.7M 0s
  5250K .......... .......... .......... .......... .......... 51% 88.6M 0s
  5300K .......... .......... .......... .......... .......... 51% 93.6M 0s
  5350K .......... .......... .......... .......... .......... 52% 88.9M 0s
  5400K .......... .......... .......... .......... .......... 52% 96.2M 0s
  5450K .......... .......... .......... .......... .......... 52% 93.3M 0s
  5500K .......... .......... .......... .......... .......... 53% 94.6M 0s
  5550K .......... .......... .......... .......... .......... 53% 94.6M 0s
  5600K .......... .......... .......... .......... .......... 54% 98.4M 0s
  5650K .......... .......... .......... .......... .......... 54% 85.7M 0s
  5700K .......... .......... .......... .......... .......... 55% 89.0M 0s
  5750K .......... .......... .......... .......... .......... 55% 89.4M 0s
  5800K .......... .......... .......... .......... .......... 56% 90.2M 0s
  5850K .......... .......... .......... .......... .......... 56% 93.7M 0s
  5900K .......... .......... .......... .......... .......... 57% 90.1M 0s
  5950K .......... .......... .......... .......... .......... 57%  101M 0s
  6000K .......... .......... .......... .......... .......... 58%  111M 0s
  6050K .......... .......... .......... .......... .......... 58% 86.9M 0s
  6100K .......... .......... .......... .......... .......... 59%  101M 0s
  6150K .......... .......... .......... .......... .......... 59% 86.4M 0s
  6200K .......... .......... .......... .......... .......... 60% 91.8M 0s
  6250K .......... .......... .......... .......... .......... 60%  103M 0s
  6300K .......... .......... .......... .......... .......... 61% 86.8M 0s
  6350K .......... .......... .......... .......... .......... 61% 98.1M 0s
  6400K .......... .......... .......... .......... .......... 62% 98.1M 0s
  6450K .......... .......... .......... .......... .......... 62% 98.1M 0s
  6500K .......... .......... .......... .......... .......... 63% 90.7M 0s
  6550K .......... .......... .......... .......... .......... 63% 99.2M 0s
  6600K .......... .......... .......... .......... .......... 64% 91.6M 0s
  6650K .......... .......... .......... .......... .......... 64% 90.5M 0s
  6700K .......... .......... .......... .......... .......... 65% 92.6M 0s
  6750K .......... .......... .......... .......... .......... 65% 93.5M 0s
  6800K .......... .......... .......... .......... .......... 65%  106M 0s
  6850K .......... .......... .......... .......... .......... 66%  105M 0s
  6900K .......... .......... .......... .......... .......... 66% 94.8M 0s
  6950K .......... .......... .......... .......... .......... 67% 98.0M 0s
  7000K .......... .......... .......... .......... .......... 67% 90.5M 0s
  7050K .......... .......... .......... .......... .......... 68% 96.0M 0s
  7100K .......... .......... .......... .......... .......... 68% 87.4M 0s
  7150K .......... .......... .......... .......... .......... 69% 85.7M 0s
  7200K .......... .......... .......... .......... .......... 69% 83.9M 0s
  7250K .......... .......... .......... .......... .......... 70% 94.1M 0s
  7300K .......... .......... .......... .......... .......... 70% 85.5M 0s
  7350K .......... .......... .......... .......... .......... 71% 77.2M 0s
  7400K .......... .......... .......... .......... .......... 71% 75.6M 0s
  7450K .......... .......... .......... .......... .......... 72% 94.2M 0s
  7500K .......... .......... .......... .......... .......... 72% 84.5M 0s
  7550K .......... .......... .......... .......... .......... 73% 79.8M 0s
  7600K .......... .......... .......... .......... .......... 73% 82.5M 0s
  7650K .......... .......... .......... .......... .......... 74% 94.4M 0s
  7700K .......... .......... .......... .......... .......... 74% 84.8M 0s
  7750K .......... .......... .......... .......... .......... 75% 74.7M 0s
  7800K .......... .......... .......... .......... .......... 75% 76.7M 0s
  7850K .......... .......... .......... .......... .......... 76% 82.8M 0s
  7900K .......... .......... .......... .......... .......... 76% 78.1M 0s
  7950K .......... .......... .......... .......... .......... 77% 84.7M 0s
  8000K .......... .......... .......... .......... .......... 77% 87.3M 0s
  8050K .......... .......... .......... .......... .......... 78% 82.9M 0s
  8100K .......... .......... .......... .......... .......... 78% 95.3M 0s
  8150K .......... .......... .......... .......... .......... 78% 78.4M 0s
  8200K .......... .......... .......... .......... .......... 79% 84.4M 0s
  8250K .......... .......... .......... .......... .......... 79% 82.8M 0s
  8300K .......... .......... .......... .......... .......... 80% 85.0M 0s
  8350K .......... .......... .......... .......... .......... 80% 79.1M 0s
  8400K .......... .......... .......... .......... .......... 81% 83.1M 0s
  8450K .......... .......... .......... .......... .......... 81% 83.2M 0s
  8500K .......... .......... .......... .......... .......... 82%  101M 0s
  8550K .......... .......... .......... .......... .......... 82% 94.4M 0s
  8600K .......... .......... .......... .......... .......... 83% 84.7M 0s
  8650K .......... .......... .......... .......... .......... 83% 87.1M 0s
  8700K .......... .......... .......... .......... .......... 84% 86.1M 0s
  8750K .......... .......... .......... .......... .......... 84% 82.2M 0s
  8800K .......... .......... .......... .......... .......... 85% 86.6M 0s
  8850K .......... .......... .......... .......... .......... 85% 76.6M 0s
  8900K .......... .......... .......... .......... .......... 86% 92.9M 0s
  8950K .......... .......... .......... .......... .......... 86% 90.0M 0s
  9000K .......... .......... .......... .......... .......... 87% 82.3M 0s
  9050K .......... .......... .......... .......... .......... 87% 89.3M 0s
  9100K .......... .......... .......... .......... .......... 88% 81.2M 0s
  9150K .......... .......... .......... .......... .......... 88% 86.3M 0s
  9200K .......... .......... .......... .......... .......... 89% 84.9M 0s
  9250K .......... .......... .......... .......... .......... 89% 76.1M 0s
  9300K .......... .......... .......... .......... .......... 90% 83.7M 0s
  9350K .......... .......... .......... .......... .......... 90% 84.7M 0s
  9400K .......... .......... .......... .......... .......... 91% 99.9M 0s
  9450K .......... .......... .......... .......... .......... 91% 85.5M 0s
  9500K .......... .......... .......... .......... .......... 91% 84.4M 0s
  9550K .......... .......... .......... .......... .......... 92% 85.3M 0s
  9600K .......... .......... .......... .......... .......... 92% 84.9M 0s
  9650K .......... .......... .......... .......... .......... 93% 87.1M 0s
  9700K .......... .......... .......... .......... .......... 93% 85.8M 0s
  9750K .......... .......... .......... .......... .......... 94% 87.6M 0s
  9800K .......... .......... .......... .......... .......... 94% 93.4M 0s
  9850K .......... .......... .......... .......... .......... 95% 78.1M 0s
  9900K .......... .......... .......... .......... .......... 95%  107M 0s
  9950K .......... .......... .......... .......... .......... 96%  129M 0s
 10000K .......... .......... .......... .......... .......... 96%  101M 0s
 10050K .......... .......... .......... .......... .......... 97%  109M 0s
 10100K .......... .......... .......... .......... .......... 97%  101M 0s
 10150K .......... .......... .......... .......... .......... 98%  107M 0s
 10200K .......... .......... .......... .......... .......... 98%  127M 0s
 10250K .......... .......... .......... .......... .......... 99%  132M 0s
 10300K .......... .......... .......... .......... .......... 99%  109M 0s
 10350K .......... .......... .......... .                    100%  121M=0.1s

2019-09-06 15:42:42 (91.0 MB/s) - ‘crictl-v1.13.0-linux-amd64.tar.gz’ saved [10631149/10631149]

+ sudo tar zxvf crictl-v1.13.0-linux-amd64.tar.gz -C /usr/bin
crictl
+ rm -f crictl-v1.13.0-linux-amd64.tar.gz
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL MINIKUBE [00h 01m 28s] ##########
[workspace] $ /bin/bash /tmp/jenkins4343113609592262338.sh
########## STARTING STAGE: DEPLOY KUBERNETES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.7wE4hqneWZ
+ cat
+ chmod +x /tmp/tmp.7wE4hqneWZ
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.7wE4hqneWZ openshiftdevel:/tmp/tmp.7wE4hqneWZ
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.7wE4hqneWZ"'
+ cd /home/origin
+ sudo setenforce 0
+ sudo minikube start --vm-driver=none --extra-config=kubelet.cgroup-driver=systemd --kubernetes-version v1.12.0 --v 5
There is a newer version of minikube available (v1.3.1).  Download it here:
https://github.com/kubernetes/minikube/releases/tag/v1.3.1

To disable this notification, run the following:
minikube config set WantUpdateNotification false
Starting local Kubernetes v1.12.0 cluster...
Starting VM...
Creating CA: /root/.minikube/certs/ca.pem
Creating client certificate: /root/.minikube/certs/cert.pem
Getting VM IP address...
Moving files into cluster...
Downloading kubeadm v1.12.0
Downloading kubelet v1.12.0
Finished Downloading kubeadm v1.12.0
Finished Downloading kubelet v1.12.0
Setting up certs...
Connecting to cluster...
Setting up kubeconfig...
Starting cluster components...
Kubectl is now configured to use the cluster.
===================
WARNING: IT IS RECOMMENDED NOT TO RUN THE NONE DRIVER ON PERSONAL WORKSTATIONS
	The 'none' driver will run an insecure kubernetes apiserver as root that may leave the host vulnerable to CSRF attacks

When using the none driver, the kubectl config and credentials generated will be root owned and will appear in the root home directory.
You will need to move the files to the appropriate location and then set the correct permissions.  An example of this is below:

	sudo mv /root/.kube $HOME/.kube # this will write over any previous configuration
	sudo chown -R $USER $HOME/.kube
	sudo chgrp -R $USER $HOME/.kube

	sudo mv /root/.minikube $HOME/.minikube # this will write over any previous configuration
	sudo chown -R $USER $HOME/.minikube
	sudo chgrp -R $USER $HOME/.minikube

This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true
Loading cached images from config file.
+ sudo cp /etc/ssl/certs/ca-bundle.crt /etc/ssl/certs/ca-certificates.crt
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY KUBERNETES [00h 01m 01s] ##########
[workspace] $ /bin/bash /tmp/jenkins2599306097840057559.sh
########## STARTING STAGE: INSTALL KUSTOMIZE ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.nIy5P0OAst
+ cat
+ chmod +x /tmp/tmp.nIy5P0OAst
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.nIy5P0OAst openshiftdevel:/tmp/tmp.nIy5P0OAst
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.nIy5P0OAst"'
+ cd /home/origin
+ curl -Lo kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v2.1.0/kustomize_2.1.0_linux_amd64
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   618    0   618    0     0   1781      0 --:--:-- --:--:-- --:--:--  1786

100 22.9M  100 22.9M    0     0  35.6M      0 --:--:-- --:--:-- --:--:-- 35.6M
+ chmod u+x kustomize
+ sudo mv kustomize /usr/bin/kustomize
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL KUSTOMIZE [00h 00m 01s] ##########
[workspace] $ /bin/bash /tmp/jenkins8167746624034979052.sh
########## STARTING STAGE: INSTALL IMAGEBUILDER ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.K7i6NhcZMC
+ cat
+ chmod +x /tmp/tmp.K7i6NhcZMC
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.K7i6NhcZMC openshiftdevel:/tmp/tmp.K7i6NhcZMC
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.K7i6NhcZMC"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ go get -u github.com/openshift/imagebuilder/cmd/imagebuilder
+ sudo mv /data/bin/imagebuilder /usr/bin
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL IMAGEBUILDER [00h 00m 25s] ##########
[workspace] $ /bin/bash /tmp/jenkins8233892439018510150.sh
########## STARTING STAGE: BUILD KUBEMARK MACHINE CONTROLLERS ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.gZh02zoD8C
+ cat
+ chmod +x /tmp/tmp.gZh02zoD8C
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.gZh02zoD8C openshiftdevel:/tmp/tmp.gZh02zoD8C
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.gZh02zoD8C"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/cluster-api-provider-kubemark
+ sudo make images IMAGE=docker.io/gofed/kubemark-machine-controllers VERSION=v1.0 NO_DOCKER=1
imagebuilder -t "docker.io/gofed/kubemark-machine-controllers:v1.0" -t "docker.io/gofed/kubemark-machine-controllers:latest" ./
--> Image registry.svc.ci.openshift.org/openshift/release:golang-1.10 was not found, pulling ...
--> Pulled 0/2 layers, 21% complete
--> Pulled 1/2 layers, 76% complete
--> Pulled 2/2 layers, 100% complete
--> Extracting
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.10 as builder
--> WORKDIR /go/src/github.com/openshift/cluster-api-provider-kubemark
--> COPY . .
--> RUN go build -o ./machine-controller-manager ./cmd/manager
--> RUN go build -o ./manager ./vendor/github.com/openshift/cluster-api/cmd/manager
--> Image docker.io/gofed/base:baseci was not found, pulling ...
--> Pulled 1/2 layers, 74% complete
--> Pulled 2/2 layers, 100% complete
--> Extracting
--> FROM docker.io/gofed/base:baseci as 1
--> RUN INSTALL_PKGS="       openssh       " &&     yum install -y $INSTALL_PKGS &&     rpm -V $INSTALL_PKGS &&     yum clean all &&     curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl &&     chmod +x ./kubectl &&     mv ./kubectl /bin/kubectl &&     curl -LO https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 &&     chmod +x ./jq-linux64 &&     mv ./jq-linux64 /bin/jq
Loaded plugins: fastestmirror, ovl
Determining fastest mirrors
 * base: mirror.ash.fastserv.com
 * extras: mirror.ash.fastserv.com
 * updates: mirror.ash.fastserv.com
Resolving Dependencies
--> Running transaction check
---> Package openssh.x86_64 0:7.4p1-16.el7 will be installed
--> Processing Dependency: libfipscheck.so.1()(64bit) for package: openssh-7.4p1-16.el7.x86_64
--> Running transaction check
---> Package fipscheck-lib.x86_64 0:1.4.1-6.el7 will be installed
--> Processing Dependency: /usr/bin/fipscheck for package: fipscheck-lib-1.4.1-6.el7.x86_64
--> Running transaction check
---> Package fipscheck.x86_64 0:1.4.1-6.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

================================================================================
 Package               Arch           Version                Repository    Size
================================================================================
Installing:
 openssh               x86_64         7.4p1-16.el7           base         510 k
Installing for dependencies:
 fipscheck             x86_64         1.4.1-6.el7            base          21 k
 fipscheck-lib         x86_64         1.4.1-6.el7            base          11 k

Transaction Summary
================================================================================
Install  1 Package (+2 Dependent packages)

Total download size: 542 k
Installed size: 2.0 M
Downloading packages:
--------------------------------------------------------------------------------
Total                                              4.2 MB/s | 542 kB  00:00     
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : fipscheck-1.4.1-6.el7.x86_64                                 1/3 
  Installing : fipscheck-lib-1.4.1-6.el7.x86_64                             2/3 
  Installing : openssh-7.4p1-16.el7.x86_64                                  3/3 
  Verifying  : fipscheck-lib-1.4.1-6.el7.x86_64                             1/3 
  Verifying  : fipscheck-1.4.1-6.el7.x86_64                                 2/3 
  Verifying  : openssh-7.4p1-16.el7.x86_64                                  3/3 

Installed:
  openssh.x86_64 0:7.4p1-16.el7                                                 

Dependency Installed:
  fipscheck.x86_64 0:1.4.1-6.el7       fipscheck-lib.x86_64 0:1.4.1-6.el7      

Complete!
Loaded plugins: fastestmirror, ovl
Cleaning repos: base cbs-paas7-openshift-multiarch-el7-build extras updates
Cleaning up list of fastest mirrors
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100 40.9M  100 40.9M    0     0  65.1M      0 --:--:-- --:--:-- --:--:-- 65.1M
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed

  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100   599    0   599    0     0   2598      0 --:--:-- --:--:-- --:--:--  2604

  0 2956k    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
100 2956k  100 2956k    0     0  8318k      0 --:--:-- --:--:-- --:--:-- 55.5M
--> COPY --from=builder /go/src/github.com/openshift/cluster-api-provider-kubemark/manager /
--> COPY --from=builder /go/src/github.com/openshift/cluster-api-provider-kubemark/machine-controller-manager /
--> Committing changes to docker.io/gofed/kubemark-machine-controllers:v1.0 ...
--> Tagged as docker.io/gofed/kubemark-machine-controllers:latest
--> Done
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: BUILD KUBEMARK MACHINE CONTROLLERS [00h 01m 33s] ##########
[workspace] $ /bin/bash /tmp/jenkins2388277926782863230.sh
########## STARTING STAGE: BUILD CLUSTER AUTOSCALER ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.YqwV4OAup3
+ cat
+ chmod +x /tmp/tmp.YqwV4OAup3
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.YqwV4OAup3 openshiftdevel:/tmp/tmp.YqwV4OAup3
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.YqwV4OAup3"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/kubernetes-autoscaler
+ sudo imagebuilder -f images/cluster-autoscaler/Dockerfile -t docker.io/openshift/origin-cluster-autoscaler:v4.0 .
--> Image registry.svc.ci.openshift.org/openshift/release:golang-1.12 was not found, pulling ...
--> Pulled 1/2 layers, 65% complete
--> Pulled 2/2 layers, 100% complete
--> Extracting
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder
--> WORKDIR /go/src/k8s.io/autoscaler
--> COPY . .
--> RUN go build -o cluster-autoscaler/cluster-autoscaler ./cluster-autoscaler
--> Image registry.svc.ci.openshift.org/openshift/origin-v4.0:base was not found, pulling ...
--> Pulled 2/4 layers, 50% complete
--> Pulled 3/4 layers, 76% complete
--> Pulled 4/4 layers, 100% complete
--> Extracting
--> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1
--> COPY --from=builder /go/src/k8s.io/autoscaler/cluster-autoscaler/cluster-autoscaler /usr/bin/
--> CMD /usr/bin/cluster-autoscaler
--> LABEL summary="Cluster Autoscaler for OpenShift and Kubernetes"
--> Committing changes to docker.io/openshift/origin-cluster-autoscaler:v4.0 ...
--> Done
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: BUILD CLUSTER AUTOSCALER [00h 02m 24s] ##########
[workspace] $ /bin/bash /tmp/jenkins1762679945075673576.sh
########## STARTING STAGE: DEPLOY MACHINE API OPERATOR ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.hkH4sFrkkh
+ cat
+ chmod +x /tmp/tmp.hkH4sFrkkh
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.hkH4sFrkkh openshiftdevel:/tmp/tmp.hkH4sFrkkh
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.hkH4sFrkkh"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/machine-api-operator
+ sudo imagebuilder -t docker.io/openshift/origin-machine-api-operator:v4.0.0 .
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder
--> WORKDIR /go/src/github.com/openshift/machine-api-operator
--> COPY . .
--> RUN NO_DOCKER=1 make build
./hack/go-build.sh machine-api-operator
Using version from git...
Building github.com/openshift/machine-api-operator/cmd/machine-api-operator (v0.1.0-524-g474e14e4)
./hack/go-build.sh nodelink-controller
Using version from git...
Building github.com/openshift/machine-api-operator/cmd/nodelink-controller (v0.1.0-524-g474e14e4)
./hack/go-build.sh machine-healthcheck
Using version from git...
Building github.com/openshift/machine-api-operator/cmd/machine-healthcheck (v0.1.0-524-g474e14e4)
--> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1
--> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/install manifests
--> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/machine-api-operator .
--> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/nodelink-controller .
--> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/machine-healthcheck .
--> LABEL io.openshift.release.operator true
--> Committing changes to docker.io/openshift/origin-machine-api-operator:v4.0.0 ...
--> Done
+ sudo make deploy-kubemark
kustomize build config | kubectl apply -f -
namespace/kubemark-actuator created
serviceaccount/kubemark created
clusterrole.rbac.authorization.k8s.io/kubemark-actuator-role created
clusterrolebinding.rbac.authorization.k8s.io/kubemark-actuator-rolebinding created
configmap/deleteunreadynodes created
deployment.apps/machineapi-kubemark-controllers created
kustomize build | kubectl apply -f -
namespace/openshift-machine-api created
customresourcedefinition.apiextensions.k8s.io/clusteroperators.config.openshift.io created
customresourcedefinition.apiextensions.k8s.io/featuregates.config.openshift.io created
customresourcedefinition.apiextensions.k8s.io/machinedisruptionbudgets.healthchecking.openshift.io created
customresourcedefinition.apiextensions.k8s.io/machinehealthchecks.healthchecking.openshift.io created
customresourcedefinition.apiextensions.k8s.io/machines.machine.openshift.io created
customresourcedefinition.apiextensions.k8s.io/machinesets.machine.openshift.io created
customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created
customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created
serviceaccount/machine-api-controllers created
serviceaccount/machine-api-operator created
role.rbac.authorization.k8s.io/machine-api-controllers created
role.rbac.authorization.k8s.io/machine-api-operator created
role.rbac.authorization.k8s.io/prometheus-k8s-machine-api-operator created
clusterrole.rbac.authorization.k8s.io/machine-api-controllers created
clusterrole.rbac.authorization.k8s.io/machine-api-operator created
rolebinding.rbac.authorization.k8s.io/machine-api-controllers created
rolebinding.rbac.authorization.k8s.io/machine-api-operator created
rolebinding.rbac.authorization.k8s.io/prometheus-k8s-machine-api-operator created
clusterrolebinding.rbac.authorization.k8s.io/machine-api-controllers created
clusterrolebinding.rbac.authorization.k8s.io/machine-api-operator created
configmap/machine-api-operator-images created
service/machine-api-operator created
deployment.apps/machine-api-operator created
clusteroperator.config.openshift.io/machine-api created
kubectl apply -f config/kubemark-config-infra.yaml
customresourcedefinition.apiextensions.k8s.io/infrastructures.config.openshift.io created
infrastructure.config.openshift.io/cluster created
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY MACHINE API OPERATOR [00h 01m 16s] ##########
[workspace] $ /bin/bash /tmp/jenkins6175358285139007283.sh
########## STARTING STAGE: DEPLOY CLUSTER AUTOSCALER OPERATOR ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.58cH83fdvX
+ cat
+ chmod +x /tmp/tmp.58cH83fdvX
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.58cH83fdvX openshiftdevel:/tmp/tmp.58cH83fdvX
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.58cH83fdvX"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/cluster-autoscaler-operator/
+ sudo imagebuilder -t quay.io/openshift/origin-cluster-autoscaler-operator:v4.0 .
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder
--> WORKDIR /go/src/github.com/openshift/cluster-autoscaler-operator
--> COPY . .
--> ENV NO_DOCKER=1
--> ENV BUILD_DEST=/go/bin/cluster-autoscaler-operator
--> RUN unset VERSION && make build
go build  -ldflags "-X github.com/openshift/cluster-autoscaler-operator/pkg/version.Raw=v0.0.0-213-g045aea4" -o "/go/bin/cluster-autoscaler-operator" "github.com/openshift/cluster-autoscaler-operator/cmd/manager"
--> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1
--> COPY --from=builder /go/bin/cluster-autoscaler-operator /usr/bin/
--> COPY --from=builder /go/src/github.com/openshift/cluster-autoscaler-operator/install /manifests
--> CMD ["/usr/bin/cluster-autoscaler-operator"]
--> LABEL io.openshift.release.operator true
--> Committing changes to quay.io/openshift/origin-cluster-autoscaler-operator:v4.0 ...
--> Done
+ kustomize build
+ sudo kubectl apply -f -
customresourcedefinition.apiextensions.k8s.io/clusterautoscalers.autoscaling.openshift.io created
customresourcedefinition.apiextensions.k8s.io/machineautoscalers.autoscaling.openshift.io created
serviceaccount/cluster-autoscaler created
serviceaccount/cluster-autoscaler-operator created
role.rbac.authorization.k8s.io/cluster-autoscaler created
role.rbac.authorization.k8s.io/prometheus-k8s-cluster-autoscaler-operator created
role.rbac.authorization.k8s.io/cluster-autoscaler-operator created
clusterrole.rbac.authorization.k8s.io/cluster-autoscaler created
clusterrole.rbac.authorization.k8s.io/cluster-autoscaler-operator created
rolebinding.rbac.authorization.k8s.io/cluster-autoscaler created
rolebinding.rbac.authorization.k8s.io/prometheus-k8s-cluster-autoscaler-operator created
rolebinding.rbac.authorization.k8s.io/cluster-autoscaler-operator created
clusterrolebinding.rbac.authorization.k8s.io/cluster-autoscaler created
clusterrolebinding.rbac.authorization.k8s.io/cluster-autoscaler-operator created
configmap/cluster-autoscaler-operator-ca created
secret/cluster-autoscaler-operator-cert created
service/cluster-autoscaler-operator created
deployment.apps/cluster-autoscaler-operator created
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY CLUSTER AUTOSCALER OPERATOR [00h 00m 46s] ##########
[workspace] $ /bin/bash /tmp/jenkins1038196153358262017.sh
########## STARTING STAGE: DEPLOY CLUSTER RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.6GuYxeusxe
+ cat
+ chmod +x /tmp/tmp.6GuYxeusxe
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.6GuYxeusxe openshiftdevel:/tmp/tmp.6GuYxeusxe
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.6GuYxeusxe"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/cluster-api-provider-kubemark
+ sudo kubectl apply -f examples/machine-set.yaml
machineset.machine.openshift.io/kubemark-actuator-testing-machineset created
+ sudo kubectl apply -f examples/static-machine.yaml
machine.machine.openshift.io/minikube-static-machine created
+ sudo kubectl apply -f examples/worker-machinesets.yaml
machineset.machine.openshift.io/kubemark-actuator-testing-machineset-red created
machineset.machine.openshift.io/kubemark-actuator-testing-machineset-green created
machineset.machine.openshift.io/kubemark-actuator-testing-machineset-blue created
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY CLUSTER RESOURCES [00h 00m 02s] ##########
[workspace] $ /bin/bash /tmp/jenkins4150966887976521715.sh
########## STARTING STAGE: INSTALL GO 1.10.1 ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.m9osCtVlM5
+ cat
+ chmod +x /tmp/tmp.m9osCtVlM5
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.m9osCtVlM5 openshiftdevel:/tmp/tmp.m9osCtVlM5
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.m9osCtVlM5"'
+ cd /home/origin
+ mkdir -p /home/origin/bin
+ curl -sL -o /home/origin/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
+ chmod +x /home/origin/bin/gimme
+ gimme 1.10.1

unset GOOS;
unset GOARCH;
export GOROOT='/home/origin/.gimme/versions/go1.10.1.linux.amd64';
export PATH="/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:${PATH}";
go version >&2;

export GIMME_ENV="/home/origin/.gimme/envs/go1.10.1.env"
+ source /home/origin/.gimme/envs/go1.10.1.env
++ unset GOOS
++ unset GOARCH
++ export GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64
++ GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64
++ export PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin
++ PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin
++ go version
go version go1.10.1 linux/amd64
+ sudo cp /home/origin/.gimme/versions/go1.10.1.linux.amd64/bin/go /bin/go
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL GO 1.10.1 [00h 00m 07s] ##########
[workspace] $ /bin/bash /tmp/jenkins8056137623770307348.sh
########## STARTING STAGE: RUN E2E TESTS ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.x4hFLtkOsu
+ cat
+ chmod +x /tmp/tmp.x4hFLtkOsu
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.x4hFLtkOsu openshiftdevel:/tmp/tmp.x4hFLtkOsu
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.x4hFLtkOsu"'
+ cd /home/origin
+ set +x
go version go1.10.1 linux/amd64
# Run operator tests first to preserve logs for troubleshooting test
# failures and flakes.
# Feature:Operator tests remove deployments. Thus loosing all the logs
# previously acquired.
NAMESPACE=kube-system hack/ci-integration.sh  -ginkgo.v -ginkgo.noColor=true -ginkgo.focus "Feature:Operators" -ginkgo.failFast
=== RUN   TestE2E
Running Suite: Machine Suite
============================
Random Seed: 1567785072
Will run 7 of 16 specs

SS
------------------------------
[Feature:Operators] Cluster autoscaler operator deployment should 
  be available
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:79
I0906 15:51:12.504507   30802 framework.go:406] >>> kubeConfig: /root/.kube/config
I0906 15:51:12.548043   30802 deloyment.go:58] Deployment "cluster-autoscaler-operator" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0)
•SSSSSSS
------------------------------
[Feature:Operators] Cluster autoscaler operator should 
  reject invalid ClusterAutoscaler resources early via webhook
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:33
I0906 15:51:12.548191   30802 framework.go:406] >>> kubeConfig: /root/.kube/config
•
------------------------------
[Feature:Operators] Cluster autoscaler operator should 
  reject invalid MachineAutoscaler resources early via webhook
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:49
I0906 15:51:12.583551   30802 framework.go:406] >>> kubeConfig: /root/.kube/config
•
------------------------------
[Feature:Operators] Machine API operator deployment should 
  be available
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:18
I0906 15:51:12.603040   30802 framework.go:406] >>> kubeConfig: /root/.kube/config
I0906 15:51:12.620398   30802 deloyment.go:58] Deployment "machine-api-operator" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0)
•
------------------------------
[Feature:Operators] Machine API operator deployment should 
  reconcile controllers deployment
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:25
I0906 15:51:12.620467   30802 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: checking deployment "machine-api-controllers" is available
I0906 15:51:12.633404   30802 deloyment.go:58] Deployment "machine-api-controllers" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0)
STEP: deleting deployment "machine-api-controllers"
STEP: checking deployment "machine-api-controllers" is available again
E0906 15:51:12.642533   30802 deloyment.go:25] Error querying api for Deployment object "machine-api-controllers": deployments.apps "machine-api-controllers" not found, retrying...
E0906 15:51:13.645429   30802 deloyment.go:55] Deployment "machine-api-controllers" is not available. Status: (replicas: 1, updated: 1, ready: 0, available: 0, unavailable: 1)
I0906 15:51:14.652796   30802 deloyment.go:58] Deployment "machine-api-controllers" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0)
•
------------------------------
[Feature:Operators] Cluster autoscaler cluster operator status should 
  be available
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:90
I0906 15:51:14.652873   30802 framework.go:406] >>> kubeConfig: /root/.kube/config
•
------------------------------
[Feature:Operators] Machine API cluster operator status should 
  be available
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:53
I0906 15:51:14.670509   30802 framework.go:406] >>> kubeConfig: /root/.kube/config
•
Ran 7 of 16 Specs in 2.182 seconds
SUCCESS! -- 7 Passed | 0 Failed | 0 Pending | 9 Skipped
--- PASS: TestE2E (2.18s)
PASS
ok  	github.com/openshift/cluster-api-actuator-pkg/pkg/e2e	2.240s
NAMESPACE=kube-system hack/ci-integration.sh  -ginkgo.v -ginkgo.noColor=true -ginkgo.skip "Feature:Operators|TechPreview" -ginkgo.failFast -ginkgo.seed=1
=== RUN   TestE2E
Running Suite: Machine Suite
============================
Random Seed: 1
Will run 7 of 16 specs

SSSSSSSS
------------------------------
[Feature:Machines] Autoscaler should 
  scale up and down
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:234
I0906 15:51:17.798305   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
I0906 15:51:17.803136   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
I0906 15:51:17.826238   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: Getting existing machinesets
STEP: Getting existing machines
STEP: Getting existing nodes
I0906 15:51:17.839593   31205 autoscaler.go:286] Have 4 existing machinesets
I0906 15:51:17.839616   31205 autoscaler.go:287] Have 5 existing machines
I0906 15:51:17.839622   31205 autoscaler.go:288] Have 5 existing nodes
STEP: Creating 3 transient machinesets
STEP: [15m0s remaining] Waiting for nodes to be Ready in 3 transient machinesets
E0906 15:51:17.862011   31205 utils.go:157] Machine "e2e-29fe2-w-0-wjq5z" has no NodeRef
STEP: [14m57s remaining] Waiting for nodes to be Ready in 3 transient machinesets
I0906 15:51:20.871746   31205 utils.go:165] Machine "e2e-29fe2-w-0-wjq5z" is backing node "baf0662c-c75d-4915-b585-168375d3e877"
I0906 15:51:20.871770   31205 utils.go:149] MachineSet "e2e-29fe2-w-0" have 1 nodes
E0906 15:51:20.875721   31205 utils.go:157] Machine "e2e-29fe2-w-1-zh8bl" has no NodeRef
STEP: [14m54s remaining] Waiting for nodes to be Ready in 3 transient machinesets
I0906 15:51:23.882205   31205 utils.go:165] Machine "e2e-29fe2-w-0-wjq5z" is backing node "baf0662c-c75d-4915-b585-168375d3e877"
I0906 15:51:23.882235   31205 utils.go:149] MachineSet "e2e-29fe2-w-0" have 1 nodes
I0906 15:51:23.887807   31205 utils.go:165] Machine "e2e-29fe2-w-1-zh8bl" is backing node "3f5c1c24-bdd4-4aab-aaa3-62c71422f580"
I0906 15:51:23.887829   31205 utils.go:149] MachineSet "e2e-29fe2-w-1" have 1 nodes
I0906 15:51:23.892864   31205 utils.go:165] Machine "e2e-29fe2-w-2-6xt9z" is backing node "c94b17dc-568c-45c5-a83d-8a40e6ef47d2"
I0906 15:51:23.892887   31205 utils.go:149] MachineSet "e2e-29fe2-w-2" have 1 nodes
I0906 15:51:23.892895   31205 utils.go:177] Node "baf0662c-c75d-4915-b585-168375d3e877" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:51:22 +0000 UTC 2019-09-06 15:51:20 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:51:22 +0000 UTC 2019-09-06 15:51:20 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:51:22 +0000 UTC 2019-09-06 15:51:20 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:51:22 +0000 UTC 2019-09-06 15:51:20 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:51:22 +0000 UTC 2019-09-06 15:51:20 +0000 UTC KubeletReady kubelet is posting ready status}]
I0906 15:51:23.892953   31205 utils.go:177] Node "3f5c1c24-bdd4-4aab-aaa3-62c71422f580" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:51:22 +0000 UTC 2019-09-06 15:51:22 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:51:22 +0000 UTC 2019-09-06 15:51:22 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:51:22 +0000 UTC 2019-09-06 15:51:22 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:51:22 +0000 UTC 2019-09-06 15:51:22 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:51:22 +0000 UTC 2019-09-06 15:51:22 +0000 UTC KubeletReady kubelet is posting ready status}]
I0906 15:51:23.893071   31205 utils.go:177] Node "c94b17dc-568c-45c5-a83d-8a40e6ef47d2" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:51:23 +0000 UTC 2019-09-06 15:51:21 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:51:23 +0000 UTC 2019-09-06 15:51:21 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:51:23 +0000 UTC 2019-09-06 15:51:21 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:51:23 +0000 UTC 2019-09-06 15:51:21 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:51:23 +0000 UTC 2019-09-06 15:51:21 +0000 UTC KubeletReady kubelet is posting ready status}]
STEP: Getting nodes
STEP: Creating 3 machineautoscalers
I0906 15:51:23.896153   31205 autoscaler.go:340] Create MachineAutoscaler backed by MachineSet kube-system/e2e-29fe2-w-0 - min:1, max:2
I0906 15:51:23.903413   31205 autoscaler.go:340] Create MachineAutoscaler backed by MachineSet kube-system/e2e-29fe2-w-1 - min:1, max:2
I0906 15:51:23.907148   31205 autoscaler.go:340] Create MachineAutoscaler backed by MachineSet kube-system/e2e-29fe2-w-2 - min:1, max:2
STEP: Creating ClusterAutoscaler configured with maxNodesTotal:10
STEP: Deriving Memory capacity from machine "kubemark-actuator-testing-machineset"
I0906 15:51:24.021565   31205 autoscaler.go:377] Memory capacity of worker node "13421fc4-81a1-45e2-b819-ab55a5e6869c" is 3840Mi
STEP: Creating scale-out workload: jobs: 11, memory: 2818572300
I0906 15:51:24.053685   31205 autoscaler.go:399] [15m0s remaining] Expecting 2 "ScaledUpGroup" events; observed 0
I0906 15:51:25.011725   31205 autoscaler.go:361] cluster-autoscaler: cluster-autoscaler-default-598c649f66-ffktq became leader
I0906 15:51:27.053906   31205 autoscaler.go:399] [14m57s remaining] Expecting 2 "ScaledUpGroup" events; observed 0
I0906 15:51:30.054076   31205 autoscaler.go:399] [14m54s remaining] Expecting 2 "ScaledUpGroup" events; observed 0
I0906 15:51:33.054337   31205 autoscaler.go:399] [14m51s remaining] Expecting 2 "ScaledUpGroup" events; observed 0
I0906 15:51:35.157265   31205 autoscaler.go:361] cluster-autoscaler-status: Max total nodes in cluster reached: 10
I0906 15:51:35.160626   31205 autoscaler.go:361] cluster-autoscaler-status: Scale-up: setting group kube-system/e2e-29fe2-w-1 size to 2
I0906 15:51:35.168294   31205 autoscaler.go:361] e2e-autoscaler-workload-vw6n7: pod triggered scale-up: [{kube-system/e2e-29fe2-w-1 1->2 (max: 2)}]
I0906 15:51:35.170642   31205 autoscaler.go:361] cluster-autoscaler-status: Scale-up: group kube-system/e2e-29fe2-w-1 size set to 2
I0906 15:51:35.175691   31205 autoscaler.go:361] e2e-autoscaler-workload-xgx9k: pod triggered scale-up: [{kube-system/e2e-29fe2-w-1 1->2 (max: 2)}]
I0906 15:51:35.178323   31205 autoscaler.go:361] e2e-autoscaler-workload-b5g5t: pod triggered scale-up: [{kube-system/e2e-29fe2-w-1 1->2 (max: 2)}]
I0906 15:51:35.189429   31205 autoscaler.go:361] e2e-autoscaler-workload-wlk77: pod triggered scale-up: [{kube-system/e2e-29fe2-w-1 1->2 (max: 2)}]
I0906 15:51:35.192968   31205 autoscaler.go:361] e2e-autoscaler-workload-sf2vz: pod triggered scale-up: [{kube-system/e2e-29fe2-w-1 1->2 (max: 2)}]
I0906 15:51:35.199674   31205 autoscaler.go:361] e2e-autoscaler-workload-xpml9: pod triggered scale-up: [{kube-system/e2e-29fe2-w-1 1->2 (max: 2)}]
I0906 15:51:35.204102   31205 autoscaler.go:361] e2e-autoscaler-workload-f4ql7: pod triggered scale-up: [{kube-system/e2e-29fe2-w-1 1->2 (max: 2)}]
I0906 15:51:35.357804   31205 autoscaler.go:361] e2e-autoscaler-workload-nndtb: pod triggered scale-up: [{kube-system/e2e-29fe2-w-1 1->2 (max: 2)}]
I0906 15:51:36.054561   31205 autoscaler.go:399] [14m48s remaining] Expecting 2 "ScaledUpGroup" events; observed 1
I0906 15:51:39.054808   31205 autoscaler.go:399] [14m45s remaining] Expecting 2 "ScaledUpGroup" events; observed 1
I0906 15:51:42.055678   31205 autoscaler.go:399] [14m42s remaining] Expecting 2 "ScaledUpGroup" events; observed 1
I0906 15:51:45.055896   31205 autoscaler.go:399] [14m39s remaining] Expecting 2 "ScaledUpGroup" events; observed 1
I0906 15:51:45.181499   31205 autoscaler.go:361] cluster-autoscaler-status: Scale-up: setting group kube-system/e2e-29fe2-w-0 size to 2
I0906 15:51:45.198986   31205 autoscaler.go:361] e2e-autoscaler-workload-nndtb: pod triggered scale-up: [{kube-system/e2e-29fe2-w-0 1->2 (max: 2)}]
I0906 15:51:45.208041   31205 autoscaler.go:361] cluster-autoscaler-status: Scale-up: group kube-system/e2e-29fe2-w-0 size set to 2
I0906 15:51:45.215316   31205 autoscaler.go:361] e2e-autoscaler-workload-wlk77: pod triggered scale-up: [{kube-system/e2e-29fe2-w-0 1->2 (max: 2)}]
I0906 15:51:45.221284   31205 autoscaler.go:361] e2e-autoscaler-workload-f4ql7: pod triggered scale-up: [{kube-system/e2e-29fe2-w-0 1->2 (max: 2)}]
I0906 15:51:45.230159   31205 autoscaler.go:361] e2e-autoscaler-workload-vw6n7: pod triggered scale-up: [{kube-system/e2e-29fe2-w-0 1->2 (max: 2)}]
I0906 15:51:45.247487   31205 autoscaler.go:361] e2e-autoscaler-workload-xgx9k: pod triggered scale-up: [{kube-system/e2e-29fe2-w-0 1->2 (max: 2)}]
I0906 15:51:45.258599   31205 autoscaler.go:361] e2e-autoscaler-workload-b5g5t: pod triggered scale-up: [{kube-system/e2e-29fe2-w-0 1->2 (max: 2)}]
I0906 15:51:45.262247   31205 autoscaler.go:361] e2e-autoscaler-workload-sf2vz: pod triggered scale-up: [{kube-system/e2e-29fe2-w-0 1->2 (max: 2)}]
I0906 15:51:48.057447   31205 autoscaler.go:399] [14m36s remaining] Expecting 2 "ScaledUpGroup" events; observed 2
I0906 15:51:48.058204   31205 autoscaler.go:414] [1m0s remaining] Waiting for cluster-autoscaler to generate a "MaxNodesTotalReached" event; observed 1
I0906 15:51:48.058233   31205 autoscaler.go:422] [1m0s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:51:51.058384   31205 autoscaler.go:422] [57s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:51:54.058677   31205 autoscaler.go:422] [54s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:51:57.058904   31205 autoscaler.go:422] [51s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:00.059109   31205 autoscaler.go:422] [48s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:03.059417   31205 autoscaler.go:422] [45s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:06.059636   31205 autoscaler.go:422] [42s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:09.059807   31205 autoscaler.go:422] [39s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:12.060053   31205 autoscaler.go:422] [36s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:15.060269   31205 autoscaler.go:422] [33s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:18.060526   31205 autoscaler.go:422] [30s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:21.060765   31205 autoscaler.go:422] [27s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:24.060983   31205 autoscaler.go:422] [24s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:27.061172   31205 autoscaler.go:422] [21s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:30.061412   31205 autoscaler.go:422] [18s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:33.061604   31205 autoscaler.go:422] [15s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:36.061817   31205 autoscaler.go:422] [12s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:39.062069   31205 autoscaler.go:422] [9s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:42.062292   31205 autoscaler.go:422] [6s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:52:45.062574   31205 autoscaler.go:422] [3s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
STEP: Deleting workload
I0906 15:52:48.058399   31205 autoscaler.go:249] [cleanup] "e2e-autoscaler-workload" (*v1.Job)
I0906 15:52:48.062005   31205 autoscaler.go:434] [15m0s remaining] Expecting 2 "ScaleDownEmpty" events; observed 2
I0906 15:52:48.079623   31205 autoscaler.go:445] still have workload POD: "e2e-autoscaler-workload-28ddq"
I0906 15:52:48.079657   31205 autoscaler.go:249] [cleanup] "default" (*v1.ClusterAutoscaler)
I0906 15:52:48.136581   31205 autoscaler.go:465] Waiting for cluster-autoscaler POD "cluster-autoscaler-default-598c649f66-ffktq" to disappear
STEP: Scaling transient machinesets to zero
I0906 15:52:48.136654   31205 autoscaler.go:474] Scaling transient machineset "e2e-29fe2-w-0" to zero
I0906 15:52:48.145148   31205 autoscaler.go:474] Scaling transient machineset "e2e-29fe2-w-1" to zero
I0906 15:52:48.162610   31205 autoscaler.go:474] Scaling transient machineset "e2e-29fe2-w-2" to zero
STEP: Waiting for scaled up nodes to be deleted
I0906 15:52:48.197229   31205 autoscaler.go:491] [15m0s remaining] Waiting for cluster to reach original node count of 5; currently have 10
I0906 15:52:51.200770   31205 autoscaler.go:491] [14m57s remaining] Waiting for cluster to reach original node count of 5; currently have 6
I0906 15:52:54.204030   31205 autoscaler.go:491] [14m54s remaining] Waiting for cluster to reach original node count of 5; currently have 5
STEP: Waiting for scaled up machines to be deleted
I0906 15:52:54.207891   31205 autoscaler.go:501] [15m0s remaining] Waiting for cluster to reach original machine count of 5; currently have 5
I0906 15:52:54.207933   31205 autoscaler.go:249] [cleanup] "autoscale-e2e-29fe2-w-0pvc9x" (*v1beta1.MachineAutoscaler)
I0906 15:52:54.211122   31205 autoscaler.go:249] [cleanup] "autoscale-e2e-29fe2-w-12bgd5" (*v1beta1.MachineAutoscaler)
I0906 15:52:54.214499   31205 autoscaler.go:249] [cleanup] "autoscale-e2e-29fe2-w-27d6rb" (*v1beta1.MachineAutoscaler)
I0906 15:52:54.220025   31205 autoscaler.go:249] [cleanup] "e2e-29fe2-w-0" (*v1beta1.MachineSet)
I0906 15:52:54.223460   31205 autoscaler.go:249] [cleanup] "e2e-29fe2-w-1" (*v1beta1.MachineSet)
I0906 15:52:54.228116   31205 autoscaler.go:249] [cleanup] "e2e-29fe2-w-2" (*v1beta1.MachineSet)

• [SLOW TEST:96.433 seconds]
[Feature:Machines] Autoscaler should
/data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:233
  scale up and down
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:234
------------------------------
S
------------------------------
[Feature:Machines] Managed cluster should 
  have machines linked with nodes
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:136
I0906 15:52:54.231648   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
I0906 15:52:54.249245   31205 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0906 15:52:54.249273   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-q2n9r" is linked to node "13421fc4-81a1-45e2-b819-ab55a5e6869c"
I0906 15:52:54.249288   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-2kj9n" is linked to node "f6279809-ed73-41b2-8127-87cfb777a67a"
I0906 15:52:54.249301   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-h4t9b" is linked to node "de17a193-07be-4165-bf27-a3510a938d6e"
I0906 15:52:54.249318   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-ff79d" is linked to node "fb6245e1-f356-4fab-9152-f02e39073964"
I0906 15:52:54.249327   31205 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"
•
------------------------------
[Feature:Machines] Managed cluster should 
  have ability to additively reconcile taints from machine to nodes
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:145
I0906 15:52:54.249431   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: getting machine "kubemark-actuator-testing-machineset-blue-q2n9r"
I0906 15:52:54.271595   31205 utils.go:165] Machine "kubemark-actuator-testing-machineset-blue-q2n9r" is backing node "13421fc4-81a1-45e2-b819-ab55a5e6869c"
STEP: getting the backed node "13421fc4-81a1-45e2-b819-ab55a5e6869c"
STEP: updating node "13421fc4-81a1-45e2-b819-ab55a5e6869c" with taint: {not-from-machine true NoSchedule <nil>}
STEP: updating machine "kubemark-actuator-testing-machineset-blue-q2n9r" with taint: {from-machine-63791ce9-d0be-11e9-953e-0a2b6dea9fc2 true NoSchedule <nil>}
I0906 15:52:54.281984   31205 infra.go:184] Getting node from machine again for verification of taints
I0906 15:52:54.285206   31205 utils.go:165] Machine "kubemark-actuator-testing-machineset-blue-q2n9r" is backing node "13421fc4-81a1-45e2-b819-ab55a5e6869c"
I0906 15:52:54.285231   31205 infra.go:194] Expected : map[from-machine-63791ce9-d0be-11e9-953e-0a2b6dea9fc2:{} not-from-machine:{}], observed map[kubemark:{} not-from-machine:{} from-machine-63791ce9-d0be-11e9-953e-0a2b6dea9fc2:{}] , difference map[], 
STEP: Getting the latest version of the original machine
STEP: Setting back the original machine taints
STEP: Getting the latest version of the node
I0906 15:52:54.294018   31205 utils.go:165] Machine "kubemark-actuator-testing-machineset-blue-q2n9r" is backing node "13421fc4-81a1-45e2-b819-ab55a5e6869c"
STEP: Setting back the original node taints
•
------------------------------
[Feature:Machines] Managed cluster should 
  recover from deleted worker machines
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:220
I0906 15:52:54.297187   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: checking initial cluster state
I0906 15:52:54.317831   31205 utils.go:87] Cluster size is 5 nodes
I0906 15:52:54.317864   31205 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes
I0906 15:52:54.321331   31205 utils.go:99] MachineSet "e2e-29fe2-w-0" replicas 0. Ready: 0, available 0
I0906 15:52:54.321354   31205 utils.go:99] MachineSet "e2e-29fe2-w-1" replicas 0. Ready: 0, available 0
I0906 15:52:54.321363   31205 utils.go:99] MachineSet "e2e-29fe2-w-2" replicas 0. Ready: 0, available 0
I0906 15:52:54.321371   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:52:54.321379   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:52:54.321388   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:52:54.321408   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:52:54.323804   31205 utils.go:231] Node "13421fc4-81a1-45e2-b819-ab55a5e6869c". Ready: true. Unschedulable: false
I0906 15:52:54.323825   31205 utils.go:231] Node "de17a193-07be-4165-bf27-a3510a938d6e". Ready: true. Unschedulable: false
I0906 15:52:54.323831   31205 utils.go:231] Node "f6279809-ed73-41b2-8127-87cfb777a67a". Ready: true. Unschedulable: false
I0906 15:52:54.323836   31205 utils.go:231] Node "fb6245e1-f356-4fab-9152-f02e39073964". Ready: true. Unschedulable: false
I0906 15:52:54.323841   31205 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:52:54.326244   31205 utils.go:87] Cluster size is 5 nodes
I0906 15:52:54.326261   31205 utils.go:257] waiting for all nodes to be ready
I0906 15:52:54.329147   31205 utils.go:262] waiting for all nodes to be schedulable
I0906 15:52:54.332376   31205 utils.go:290] [remaining 1m0s] Node "13421fc4-81a1-45e2-b819-ab55a5e6869c" is schedulable
I0906 15:52:54.332394   31205 utils.go:290] [remaining 1m0s] Node "de17a193-07be-4165-bf27-a3510a938d6e" is schedulable
I0906 15:52:54.332401   31205 utils.go:290] [remaining 1m0s] Node "f6279809-ed73-41b2-8127-87cfb777a67a" is schedulable
I0906 15:52:54.332412   31205 utils.go:290] [remaining 1m0s] Node "fb6245e1-f356-4fab-9152-f02e39073964" is schedulable
I0906 15:52:54.332422   31205 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable
I0906 15:52:54.332434   31205 utils.go:267] waiting for each node to be backed by a machine
I0906 15:52:54.338337   31205 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0906 15:52:54.338364   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-q2n9r" is linked to node "13421fc4-81a1-45e2-b819-ab55a5e6869c"
I0906 15:52:54.338374   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-2kj9n" is linked to node "f6279809-ed73-41b2-8127-87cfb777a67a"
I0906 15:52:54.338382   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-h4t9b" is linked to node "de17a193-07be-4165-bf27-a3510a938d6e"
I0906 15:52:54.338389   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-ff79d" is linked to node "fb6245e1-f356-4fab-9152-f02e39073964"
I0906 15:52:54.338397   31205 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"
STEP: getting worker node
STEP: deleting machine object "kubemark-actuator-testing-machineset-blue-q2n9r"
STEP: waiting for node object "13421fc4-81a1-45e2-b819-ab55a5e6869c" to go away
I0906 15:52:54.351340   31205 infra.go:255] Node "13421fc4-81a1-45e2-b819-ab55a5e6869c" still exists. Node conditions are: [{OutOfDisk False 2019-09-06 15:52:53 +0000 UTC 2019-09-06 15:50:28 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:52:53 +0000 UTC 2019-09-06 15:50:28 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:52:53 +0000 UTC 2019-09-06 15:50:28 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:52:53 +0000 UTC 2019-09-06 15:50:28 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:52:53 +0000 UTC 2019-09-06 15:50:28 +0000 UTC KubeletReady kubelet is posting ready status}]
STEP: waiting for new node object to come up
I0906 15:52:59.356163   31205 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes
I0906 15:52:59.359372   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:52:59.359397   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:52:59.359404   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:52:59.359410   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:52:59.362047   31205 utils.go:231] Node "dbe55a04-5eb1-42df-ae80-846f79da969a". Ready: true. Unschedulable: false
I0906 15:52:59.362070   31205 utils.go:231] Node "de17a193-07be-4165-bf27-a3510a938d6e". Ready: true. Unschedulable: false
I0906 15:52:59.362080   31205 utils.go:231] Node "f6279809-ed73-41b2-8127-87cfb777a67a". Ready: true. Unschedulable: false
I0906 15:52:59.362088   31205 utils.go:231] Node "fb6245e1-f356-4fab-9152-f02e39073964". Ready: true. Unschedulable: false
I0906 15:52:59.362096   31205 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:52:59.364796   31205 utils.go:87] Cluster size is 5 nodes
I0906 15:52:59.364819   31205 utils.go:257] waiting for all nodes to be ready
I0906 15:52:59.368341   31205 utils.go:262] waiting for all nodes to be schedulable
I0906 15:52:59.372408   31205 utils.go:290] [remaining 1m0s] Node "dbe55a04-5eb1-42df-ae80-846f79da969a" is schedulable
I0906 15:52:59.372431   31205 utils.go:290] [remaining 1m0s] Node "de17a193-07be-4165-bf27-a3510a938d6e" is schedulable
I0906 15:52:59.372438   31205 utils.go:290] [remaining 1m0s] Node "f6279809-ed73-41b2-8127-87cfb777a67a" is schedulable
I0906 15:52:59.372444   31205 utils.go:290] [remaining 1m0s] Node "fb6245e1-f356-4fab-9152-f02e39073964" is schedulable
I0906 15:52:59.372450   31205 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable
I0906 15:52:59.372455   31205 utils.go:267] waiting for each node to be backed by a machine
I0906 15:52:59.381314   31205 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0906 15:52:59.381344   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-c54hf" is linked to node "dbe55a04-5eb1-42df-ae80-846f79da969a"
I0906 15:52:59.381354   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-2kj9n" is linked to node "f6279809-ed73-41b2-8127-87cfb777a67a"
I0906 15:52:59.381362   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-h4t9b" is linked to node "de17a193-07be-4165-bf27-a3510a938d6e"
I0906 15:52:59.381370   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-ff79d" is linked to node "fb6245e1-f356-4fab-9152-f02e39073964"
I0906 15:52:59.381378   31205 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"

• [SLOW TEST:5.084 seconds]
[Feature:Machines] Managed cluster should
/data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126
  recover from deleted worker machines
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:220
------------------------------
[Feature:Machines] Managed cluster should 
  grow and decrease when scaling different machineSets simultaneously
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:267
I0906 15:52:59.381476   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: checking existing cluster size
I0906 15:52:59.397353   31205 utils.go:87] Cluster size is 5 nodes
STEP: getting worker machineSets
I0906 15:52:59.400298   31205 infra.go:297] Creating transient MachineSet "e2e-66871-w-0"
I0906 15:52:59.403194   31205 infra.go:297] Creating transient MachineSet "e2e-66871-w-1"
STEP: scaling "e2e-66871-w-0" from 0 to 2 replicas
I0906 15:52:59.407065   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: scaling "e2e-66871-w-1" from 0 to 2 replicas
I0906 15:52:59.427226   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
E0906 15:52:59.467584   31205 utils.go:157] Machine "e2e-66871-w-0-7pnmc" has no NodeRef
I0906 15:53:04.519443   31205 utils.go:165] Machine "e2e-66871-w-0-7pnmc" is backing node "5f623183-6d0a-4623-8aaf-40106f8e4c82"
E0906 15:53:04.520189   31205 utils.go:157] Machine "e2e-66871-w-0-8mf5p" has no NodeRef
I0906 15:53:09.527000   31205 utils.go:165] Machine "e2e-66871-w-0-7pnmc" is backing node "5f623183-6d0a-4623-8aaf-40106f8e4c82"
I0906 15:53:09.529387   31205 utils.go:165] Machine "e2e-66871-w-0-8mf5p" is backing node "dc4e2890-f2e8-44ec-9f83-2b029b3c40c5"
I0906 15:53:09.529411   31205 utils.go:149] MachineSet "e2e-66871-w-0" have 2 nodes
I0906 15:53:09.535078   31205 utils.go:165] Machine "e2e-66871-w-1-5mc8v" is backing node "83b89346-0d2b-4efb-8058-c8aebddc8542"
I0906 15:53:09.536884   31205 utils.go:165] Machine "e2e-66871-w-1-nx7px" is backing node "76df007a-03ab-40b0-a473-d29caacf0c6b"
I0906 15:53:09.536910   31205 utils.go:149] MachineSet "e2e-66871-w-1" have 2 nodes
I0906 15:53:09.536920   31205 utils.go:177] Node "5f623183-6d0a-4623-8aaf-40106f8e4c82" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:53:08 +0000 UTC 2019-09-06 15:53:02 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:53:08 +0000 UTC 2019-09-06 15:53:02 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:53:08 +0000 UTC 2019-09-06 15:53:02 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:53:08 +0000 UTC 2019-09-06 15:53:02 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:53:08 +0000 UTC 2019-09-06 15:53:02 +0000 UTC KubeletReady kubelet is posting ready status}]
I0906 15:53:09.537003   31205 utils.go:177] Node "dc4e2890-f2e8-44ec-9f83-2b029b3c40c5" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:53:07 +0000 UTC 2019-09-06 15:53:03 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:53:07 +0000 UTC 2019-09-06 15:53:03 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:53:07 +0000 UTC 2019-09-06 15:53:03 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:53:07 +0000 UTC 2019-09-06 15:53:03 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:53:07 +0000 UTC 2019-09-06 15:53:03 +0000 UTC KubeletReady kubelet is posting ready status}]
I0906 15:53:09.537037   31205 utils.go:177] Node "83b89346-0d2b-4efb-8058-c8aebddc8542" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:53:08 +0000 UTC 2019-09-06 15:53:04 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:53:08 +0000 UTC 2019-09-06 15:53:04 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:53:08 +0000 UTC 2019-09-06 15:53:04 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:53:08 +0000 UTC 2019-09-06 15:53:04 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:53:08 +0000 UTC 2019-09-06 15:53:04 +0000 UTC KubeletReady kubelet is posting ready status}]
I0906 15:53:09.537059   31205 utils.go:177] Node "76df007a-03ab-40b0-a473-d29caacf0c6b" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:53:09 +0000 UTC 2019-09-06 15:53:03 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:53:09 +0000 UTC 2019-09-06 15:53:03 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:53:09 +0000 UTC 2019-09-06 15:53:03 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:53:09 +0000 UTC 2019-09-06 15:53:03 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:53:09 +0000 UTC 2019-09-06 15:53:03 +0000 UTC KubeletReady kubelet is posting ready status}]
STEP: scaling "e2e-66871-w-0" from 2 to 0 replicas
I0906 15:53:09.537109   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: scaling "e2e-66871-w-1" from 2 to 0 replicas
I0906 15:53:09.558313   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: waiting for cluster to get back to original size. Final size should be 5 nodes
I0906 15:53:09.591717   31205 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes
I0906 15:53:09.598372   31205 utils.go:99] MachineSet "e2e-66871-w-0" replicas 0. Ready: 2, available 2
I0906 15:53:09.598399   31205 utils.go:99] MachineSet "e2e-66871-w-1" replicas 0. Ready: 2, available 2
I0906 15:53:09.598408   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:53:09.598416   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:53:09.598425   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:53:09.598433   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:53:09.603322   31205 utils.go:231] Node "5f623183-6d0a-4623-8aaf-40106f8e4c82". Ready: true. Unschedulable: false
I0906 15:53:09.603352   31205 utils.go:231] Node "76df007a-03ab-40b0-a473-d29caacf0c6b". Ready: true. Unschedulable: false
I0906 15:53:09.603362   31205 utils.go:231] Node "83b89346-0d2b-4efb-8058-c8aebddc8542". Ready: true. Unschedulable: false
I0906 15:53:09.603370   31205 utils.go:231] Node "dbe55a04-5eb1-42df-ae80-846f79da969a". Ready: true. Unschedulable: false
I0906 15:53:09.603379   31205 utils.go:231] Node "dc4e2890-f2e8-44ec-9f83-2b029b3c40c5". Ready: true. Unschedulable: true
I0906 15:53:09.603388   31205 utils.go:231] Node "de17a193-07be-4165-bf27-a3510a938d6e". Ready: true. Unschedulable: false
I0906 15:53:09.603406   31205 utils.go:231] Node "f6279809-ed73-41b2-8127-87cfb777a67a". Ready: true. Unschedulable: false
I0906 15:53:09.603415   31205 utils.go:231] Node "fb6245e1-f356-4fab-9152-f02e39073964". Ready: true. Unschedulable: false
I0906 15:53:09.603423   31205 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:53:09.607374   31205 utils.go:87] Cluster size is 9 nodes
I0906 15:53:14.607531   31205 utils.go:239] [remaining 14m55s] Cluster size expected to be 5 nodes
I0906 15:53:14.611809   31205 utils.go:99] MachineSet "e2e-66871-w-0" replicas 0. Ready: 0, available 0
I0906 15:53:14.611833   31205 utils.go:99] MachineSet "e2e-66871-w-1" replicas 0. Ready: 0, available 0
I0906 15:53:14.611839   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:53:14.611845   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:53:14.611850   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:53:14.611855   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:53:14.616847   31205 utils.go:231] Node "dbe55a04-5eb1-42df-ae80-846f79da969a". Ready: true. Unschedulable: false
I0906 15:53:14.616874   31205 utils.go:231] Node "de17a193-07be-4165-bf27-a3510a938d6e". Ready: true. Unschedulable: false
I0906 15:53:14.616883   31205 utils.go:231] Node "f6279809-ed73-41b2-8127-87cfb777a67a". Ready: true. Unschedulable: false
I0906 15:53:14.616891   31205 utils.go:231] Node "fb6245e1-f356-4fab-9152-f02e39073964". Ready: true. Unschedulable: false
I0906 15:53:14.616899   31205 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:53:14.623395   31205 utils.go:87] Cluster size is 5 nodes
I0906 15:53:14.623426   31205 utils.go:257] waiting for all nodes to be ready
I0906 15:53:14.627403   31205 utils.go:262] waiting for all nodes to be schedulable
I0906 15:53:14.633097   31205 utils.go:290] [remaining 1m0s] Node "dbe55a04-5eb1-42df-ae80-846f79da969a" is schedulable
I0906 15:53:14.633134   31205 utils.go:290] [remaining 1m0s] Node "de17a193-07be-4165-bf27-a3510a938d6e" is schedulable
I0906 15:53:14.633146   31205 utils.go:290] [remaining 1m0s] Node "f6279809-ed73-41b2-8127-87cfb777a67a" is schedulable
I0906 15:53:14.633156   31205 utils.go:290] [remaining 1m0s] Node "fb6245e1-f356-4fab-9152-f02e39073964" is schedulable
I0906 15:53:14.633166   31205 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable
I0906 15:53:14.633176   31205 utils.go:267] waiting for each node to be backed by a machine
I0906 15:53:14.647933   31205 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0906 15:53:14.647971   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-c54hf" is linked to node "dbe55a04-5eb1-42df-ae80-846f79da969a"
I0906 15:53:14.648020   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-2kj9n" is linked to node "f6279809-ed73-41b2-8127-87cfb777a67a"
I0906 15:53:14.648034   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-h4t9b" is linked to node "de17a193-07be-4165-bf27-a3510a938d6e"
I0906 15:53:14.648047   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-ff79d" is linked to node "fb6245e1-f356-4fab-9152-f02e39073964"
I0906 15:53:14.648061   31205 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"

• [SLOW TEST:15.288 seconds]
[Feature:Machines] Managed cluster should
/data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126
  grow and decrease when scaling different machineSets simultaneously
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:267
------------------------------
[Feature:Machines] Managed cluster should 
  drain node before removing machine resource
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:346
I0906 15:53:14.669376   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: checking existing cluster size
I0906 15:53:14.695637   31205 utils.go:87] Cluster size is 5 nodes
STEP: Taking the first worker machineset (assuming only worker machines are backed by machinesets)
STEP: Creating two new machines, one for node about to be drained, other for moving workload from drained node
STEP: Waiting until both new nodes are ready
E0906 15:53:14.722237   31205 utils.go:342] [remaining 15m0s] Expecting 2 nodes with map[string]string{"node-draining-test":"29f771af-d0be-11e9-953e-0a2b6dea9fc2", "node-role.kubernetes.io/worker":""} labels in Ready state, got 0
I0906 15:53:19.725948   31205 utils.go:346] [14m55s remaining] Expected number (2) of nodes with map[node-role.kubernetes.io/worker: node-draining-test:29f771af-d0be-11e9-953e-0a2b6dea9fc2] label in Ready state found
STEP: Creating RC with workload
STEP: Creating PDB for RC
STEP: Wait until all replicas are ready
I0906 15:53:19.763639   31205 utils.go:396] [15m0s remaining] Waiting for at least one RC ready replica, ReadyReplicas: 0, Replicas: 0
I0906 15:53:24.766554   31205 utils.go:396] [14m55s remaining] Waiting for at least one RC ready replica, ReadyReplicas: 0, Replicas: 20
I0906 15:53:29.768951   31205 utils.go:399] [14m50s remaining] Waiting for RC ready replicas, ReadyReplicas: 20, Replicas: 20
I0906 15:53:29.777181   31205 utils.go:416] POD #0/20: {
  "metadata": {
    "name": "pdb-workload-6mzsg",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-6mzsg",
    "uid": "72afe262-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3928",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.242.115.158",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://d5fe32c9cf0ea20e"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.777360   31205 utils.go:416] POD #1/20: {
  "metadata": {
    "name": "pdb-workload-6xfbb",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-6xfbb",
    "uid": "72b0398a-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3878",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "984dc528-9f2c-48f8-a73c-92d338000376",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.22",
    "podIP": "10.218.158.209",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://b8fe0f23db96470a"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.777467   31205 utils.go:416] POD #2/20: {
  "metadata": {
    "name": "pdb-workload-7h78b",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-7h78b",
    "uid": "72b02bd5-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3913",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.3.202.174",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://dfba837a0b789be4"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.777568   31205 utils.go:416] POD #3/20: {
  "metadata": {
    "name": "pdb-workload-8gznh",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-8gznh",
    "uid": "72aac801-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3855",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "984dc528-9f2c-48f8-a73c-92d338000376",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:25Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.22",
    "podIP": "10.42.167.43",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://b3e49d5a53dffa00"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.777671   31205 utils.go:416] POD #4/20: {
  "metadata": {
    "name": "pdb-workload-96snx",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-96snx",
    "uid": "72abffe7-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3862",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.0.145.182",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:23Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://e4ae83e9e4297ca"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.777806   31205 utils.go:416] POD #5/20: {
  "metadata": {
    "name": "pdb-workload-c6pgd",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-c6pgd",
    "uid": "72a7e106-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3846",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "984dc528-9f2c-48f8-a73c-92d338000376",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:25Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.22",
    "podIP": "10.226.107.89",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:23Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://f89a9b7dbd75c70f"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.777906   31205 utils.go:416] POD #6/20: {
  "metadata": {
    "name": "pdb-workload-cksdb",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-cksdb",
    "uid": "72abebeb-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3865",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.127.41.233",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:23Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://5a0ef37224f0bf9b"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.778359   31205 utils.go:416] POD #7/20: {
  "metadata": {
    "name": "pdb-workload-jsrjw",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-jsrjw",
    "uid": "72b0025c-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3887",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "984dc528-9f2c-48f8-a73c-92d338000376",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.22",
    "podIP": "10.179.140.209",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://48f8cb6fd4f781a7"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.778483   31205 utils.go:416] POD #8/20: {
  "metadata": {
    "name": "pdb-workload-l7296",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-l7296",
    "uid": "72aa8cc5-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3919",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.176.46.224",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:23Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://9c151bb46379b142"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.778626   31205 utils.go:416] POD #9/20: {
  "metadata": {
    "name": "pdb-workload-mxzsb",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-mxzsb",
    "uid": "72b3fe59-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3852",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "984dc528-9f2c-48f8-a73c-92d338000376",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:25Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.22",
    "podIP": "10.255.206.213",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://19cbe1bbdf180c2b"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.778753   31205 utils.go:416] POD #10/20: {
  "metadata": {
    "name": "pdb-workload-n5757",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-n5757",
    "uid": "72b3fb06-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3922",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.153.214.93",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://59ff0eb97fbda054"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.778856   31205 utils.go:416] POD #11/20: {
  "metadata": {
    "name": "pdb-workload-qq4kz",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-qq4kz",
    "uid": "72ac07ef-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3884",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "984dc528-9f2c-48f8-a73c-92d338000376",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.22",
    "podIP": "10.219.194.167",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://e9df851cec3002f4"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.779010   31205 utils.go:416] POD #12/20: {
  "metadata": {
    "name": "pdb-workload-r94jg",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-r94jg",
    "uid": "72b31904-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3925",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.8.249.41",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://1a773eaec329fda"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.779143   31205 utils.go:416] POD #13/20: {
  "metadata": {
    "name": "pdb-workload-rfdnd",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-rfdnd",
    "uid": "72b02ad2-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3890",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "984dc528-9f2c-48f8-a73c-92d338000376",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.22",
    "podIP": "10.208.17.53",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:23Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://ba62090ab2dd7c88"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.779254   31205 utils.go:416] POD #14/20: {
  "metadata": {
    "name": "pdb-workload-rs82d",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-rs82d",
    "uid": "72b062db-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3916",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.42.204.186",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://70f6430ab35af1e6"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.779361   31205 utils.go:416] POD #15/20: {
  "metadata": {
    "name": "pdb-workload-s6c5q",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-s6c5q",
    "uid": "72b3516f-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3910",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.165.9.213",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://6e2a950df3c36e12"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.779483   31205 utils.go:416] POD #16/20: {
  "metadata": {
    "name": "pdb-workload-s7p5x",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-s7p5x",
    "uid": "72b3f424-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3893",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "984dc528-9f2c-48f8-a73c-92d338000376",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.22",
    "podIP": "10.131.27.249",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://e68b96c35f64ad49"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.779639   31205 utils.go:416] POD #17/20: {
  "metadata": {
    "name": "pdb-workload-tlvcg",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-tlvcg",
    "uid": "72b04e0b-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3907",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:27Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.23",
    "podIP": "10.168.129.169",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://a0f729110fb6b284"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.779785   31205 utils.go:416] POD #18/20: {
  "metadata": {
    "name": "pdb-workload-twsg7",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-twsg7",
    "uid": "72ac0a92-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3849",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "984dc528-9f2c-48f8-a73c-92d338000376",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:25Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.22",
    "podIP": "10.56.25.63",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:25Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://b7627cccb9e4ad23"
      }
    ],
    "qosClass": "Burstable"
  }
}
I0906 15:53:29.779924   31205 utils.go:416] POD #19/20: {
  "metadata": {
    "name": "pdb-workload-vg7x7",
    "generateName": "pdb-workload-",
    "namespace": "default",
    "selfLink": "/api/v1/namespaces/default/pods/pdb-workload-vg7x7",
    "uid": "72b057e2-d0be-11e9-81a4-0a2b6dea9fc2",
    "resourceVersion": "3881",
    "creationTimestamp": "2019-09-06T15:53:19Z",
    "labels": {
      "app": "nginx"
    },
    "ownerReferences": [
      {
        "apiVersion": "v1",
        "kind": "ReplicationController",
        "name": "pdb-workload",
        "uid": "72a5ba31-d0be-11e9-81a4-0a2b6dea9fc2",
        "controller": true,
        "blockOwnerDeletion": true
      }
    ]
  },
  "spec": {
    "volumes": [
      {
        "name": "default-token-8zl7s",
        "secret": {
          "secretName": "default-token-8zl7s",
          "defaultMode": 420
        }
      }
    ],
    "containers": [
      {
        "name": "work",
        "image": "busybox",
        "command": [
          "sleep",
          "10h"
        ],
        "resources": {
          "requests": {
            "cpu": "50m",
            "memory": "50Mi"
          }
        },
        "volumeMounts": [
          {
            "name": "default-token-8zl7s",
            "readOnly": true,
            "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
          }
        ],
        "terminationMessagePath": "/dev/termination-log",
        "terminationMessagePolicy": "File",
        "imagePullPolicy": "Always"
      }
    ],
    "restartPolicy": "Always",
    "terminationGracePeriodSeconds": 30,
    "dnsPolicy": "ClusterFirst",
    "nodeSelector": {
      "node-draining-test": "29f771af-d0be-11e9-953e-0a2b6dea9fc2",
      "node-role.kubernetes.io/worker": ""
    },
    "serviceAccountName": "default",
    "serviceAccount": "default",
    "nodeName": "984dc528-9f2c-48f8-a73c-92d338000376",
    "securityContext": {},
    "schedulerName": "default-scheduler",
    "tolerations": [
      {
        "key": "kubemark",
        "operator": "Exists"
      },
      {
        "key": "node.kubernetes.io/not-ready",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      },
      {
        "key": "node.kubernetes.io/unreachable",
        "operator": "Exists",
        "effect": "NoExecute",
        "tolerationSeconds": 300
      }
    ],
    "priority": 0
  },
  "status": {
    "phase": "Running",
    "conditions": [
      {
        "type": "Initialized",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      },
      {
        "type": "Ready",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:26Z"
      },
      {
        "type": "ContainersReady",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": null
      },
      {
        "type": "PodScheduled",
        "status": "True",
        "lastProbeTime": null,
        "lastTransitionTime": "2019-09-06T15:53:19Z"
      }
    ],
    "hostIP": "172.17.0.22",
    "podIP": "10.181.241.206",
    "startTime": "2019-09-06T15:53:19Z",
    "containerStatuses": [
      {
        "name": "work",
        "state": {
          "running": {
            "startedAt": "2019-09-06T15:53:24Z"
          }
        },
        "lastState": {},
        "ready": true,
        "restartCount": 0,
        "image": "busybox:latest",
        "imageID": "docker://busybox:latest",
        "containerID": "docker://5d02b2778d1b20e9"
      }
    ],
    "qosClass": "Burstable"
  }
}
STEP: Delete machine to trigger node draining
STEP: Observing and verifying node draining
E0906 15:53:29.794226   31205 utils.go:451] Node "984dc528-9f2c-48f8-a73c-92d338000376" is expected to be marked as unschedulable, it is not
I0906 15:53:34.798476   31205 utils.go:455] [remaining 14m55s] Node "984dc528-9f2c-48f8-a73c-92d338000376" is mark unschedulable as expected
I0906 15:53:34.804745   31205 utils.go:474] [remaining 14m55s] Have 9 pods scheduled to node "984dc528-9f2c-48f8-a73c-92d338000376"
I0906 15:53:34.806365   31205 utils.go:490] [remaining 14m55s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:53:34.806391   31205 utils.go:500] [remaining 14m55s] Expecting at most 2 pods to be scheduled to drained node "984dc528-9f2c-48f8-a73c-92d338000376", got 9
I0906 15:53:39.810053   31205 utils.go:455] [remaining 14m50s] Node "984dc528-9f2c-48f8-a73c-92d338000376" is mark unschedulable as expected
I0906 15:53:39.824570   31205 utils.go:474] [remaining 14m50s] Have 8 pods scheduled to node "984dc528-9f2c-48f8-a73c-92d338000376"
I0906 15:53:39.831813   31205 utils.go:490] [remaining 14m50s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:53:39.831843   31205 utils.go:500] [remaining 14m50s] Expecting at most 2 pods to be scheduled to drained node "984dc528-9f2c-48f8-a73c-92d338000376", got 8
I0906 15:53:44.799299   31205 utils.go:455] [remaining 14m45s] Node "984dc528-9f2c-48f8-a73c-92d338000376" is mark unschedulable as expected
I0906 15:53:44.806334   31205 utils.go:474] [remaining 14m45s] Have 7 pods scheduled to node "984dc528-9f2c-48f8-a73c-92d338000376"
I0906 15:53:44.808008   31205 utils.go:490] [remaining 14m45s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:53:44.808030   31205 utils.go:500] [remaining 14m45s] Expecting at most 2 pods to be scheduled to drained node "984dc528-9f2c-48f8-a73c-92d338000376", got 7
I0906 15:53:49.800777   31205 utils.go:455] [remaining 14m40s] Node "984dc528-9f2c-48f8-a73c-92d338000376" is mark unschedulable as expected
I0906 15:53:49.810625   31205 utils.go:474] [remaining 14m40s] Have 6 pods scheduled to node "984dc528-9f2c-48f8-a73c-92d338000376"
I0906 15:53:49.812716   31205 utils.go:490] [remaining 14m40s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:53:49.812744   31205 utils.go:500] [remaining 14m40s] Expecting at most 2 pods to be scheduled to drained node "984dc528-9f2c-48f8-a73c-92d338000376", got 6
I0906 15:53:54.798419   31205 utils.go:455] [remaining 14m35s] Node "984dc528-9f2c-48f8-a73c-92d338000376" is mark unschedulable as expected
I0906 15:53:54.804692   31205 utils.go:474] [remaining 14m35s] Have 5 pods scheduled to node "984dc528-9f2c-48f8-a73c-92d338000376"
I0906 15:53:54.806344   31205 utils.go:490] [remaining 14m35s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:53:54.806370   31205 utils.go:500] [remaining 14m35s] Expecting at most 2 pods to be scheduled to drained node "984dc528-9f2c-48f8-a73c-92d338000376", got 5
I0906 15:53:59.799072   31205 utils.go:455] [remaining 14m30s] Node "984dc528-9f2c-48f8-a73c-92d338000376" is mark unschedulable as expected
I0906 15:53:59.805632   31205 utils.go:474] [remaining 14m30s] Have 4 pods scheduled to node "984dc528-9f2c-48f8-a73c-92d338000376"
I0906 15:53:59.807192   31205 utils.go:490] [remaining 14m30s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:53:59.807215   31205 utils.go:500] [remaining 14m30s] Expecting at most 2 pods to be scheduled to drained node "984dc528-9f2c-48f8-a73c-92d338000376", got 4
I0906 15:54:04.798543   31205 utils.go:455] [remaining 14m25s] Node "984dc528-9f2c-48f8-a73c-92d338000376" is mark unschedulable as expected
I0906 15:54:04.805194   31205 utils.go:474] [remaining 14m25s] Have 3 pods scheduled to node "984dc528-9f2c-48f8-a73c-92d338000376"
I0906 15:54:04.806905   31205 utils.go:490] [remaining 14m25s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:54:04.806934   31205 utils.go:500] [remaining 14m25s] Expecting at most 2 pods to be scheduled to drained node "984dc528-9f2c-48f8-a73c-92d338000376", got 3
I0906 15:54:09.799172   31205 utils.go:455] [remaining 14m20s] Node "984dc528-9f2c-48f8-a73c-92d338000376" is mark unschedulable as expected
I0906 15:54:09.805313   31205 utils.go:474] [remaining 14m20s] Have 2 pods scheduled to node "984dc528-9f2c-48f8-a73c-92d338000376"
I0906 15:54:09.807006   31205 utils.go:490] [remaining 14m20s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:54:09.807030   31205 utils.go:504] [remaining 14m20s] Expected result: all pods from the RC up to last one or two got scheduled to a different node while respecting PDB
STEP: Validating the machine is deleted
E0906 15:54:09.808635   31205 infra.go:454] Machine "machine1" not yet deleted
E0906 15:54:14.810892   31205 infra.go:454] Machine "machine1" not yet deleted
I0906 15:54:19.810914   31205 infra.go:463] Machine "machine1" successfully deleted
STEP: Validate underlying node corresponding to machine1 is removed as well
I0906 15:54:19.812678   31205 utils.go:530] [15m0s remaining] Node "984dc528-9f2c-48f8-a73c-92d338000376" successfully deleted
STEP: Delete PDB
STEP: Delete machine2
STEP: waiting for cluster to get back to original size. Final size should be 5 nodes
I0906 15:54:19.820252   31205 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes
I0906 15:54:19.825376   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:54:19.825396   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:54:19.825402   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:54:19.825408   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:54:19.828772   31205 utils.go:231] Node "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf". Ready: true. Unschedulable: false
I0906 15:54:19.828795   31205 utils.go:231] Node "dbe55a04-5eb1-42df-ae80-846f79da969a". Ready: true. Unschedulable: false
I0906 15:54:19.828804   31205 utils.go:231] Node "de17a193-07be-4165-bf27-a3510a938d6e". Ready: true. Unschedulable: false
I0906 15:54:19.828812   31205 utils.go:231] Node "f6279809-ed73-41b2-8127-87cfb777a67a". Ready: true. Unschedulable: false
I0906 15:54:19.828820   31205 utils.go:231] Node "fb6245e1-f356-4fab-9152-f02e39073964". Ready: true. Unschedulable: false
I0906 15:54:19.828828   31205 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:54:19.835160   31205 utils.go:87] Cluster size is 6 nodes
I0906 15:54:24.835346   31205 utils.go:239] [remaining 14m55s] Cluster size expected to be 5 nodes
I0906 15:54:24.839236   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:54:24.839265   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:54:24.839276   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:54:24.839285   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:54:24.843463   31205 utils.go:231] Node "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf". Ready: true. Unschedulable: true
I0906 15:54:24.843490   31205 utils.go:231] Node "dbe55a04-5eb1-42df-ae80-846f79da969a". Ready: true. Unschedulable: false
I0906 15:54:24.843500   31205 utils.go:231] Node "de17a193-07be-4165-bf27-a3510a938d6e". Ready: true. Unschedulable: false
I0906 15:54:24.843509   31205 utils.go:231] Node "f6279809-ed73-41b2-8127-87cfb777a67a". Ready: true. Unschedulable: false
I0906 15:54:24.843518   31205 utils.go:231] Node "fb6245e1-f356-4fab-9152-f02e39073964". Ready: true. Unschedulable: false
I0906 15:54:24.843526   31205 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:54:24.847143   31205 utils.go:87] Cluster size is 6 nodes
I0906 15:54:29.835385   31205 utils.go:239] [remaining 14m50s] Cluster size expected to be 5 nodes
I0906 15:54:29.838538   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:54:29.838561   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:54:29.838568   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:54:29.838573   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:54:29.841478   31205 utils.go:231] Node "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf". Ready: true. Unschedulable: true
I0906 15:54:29.841501   31205 utils.go:231] Node "dbe55a04-5eb1-42df-ae80-846f79da969a". Ready: true. Unschedulable: false
I0906 15:54:29.841511   31205 utils.go:231] Node "de17a193-07be-4165-bf27-a3510a938d6e". Ready: true. Unschedulable: false
I0906 15:54:29.841518   31205 utils.go:231] Node "f6279809-ed73-41b2-8127-87cfb777a67a". Ready: true. Unschedulable: false
I0906 15:54:29.841527   31205 utils.go:231] Node "fb6245e1-f356-4fab-9152-f02e39073964". Ready: true. Unschedulable: false
I0906 15:54:29.841539   31205 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:54:29.844317   31205 utils.go:87] Cluster size is 6 nodes
I0906 15:54:34.835446   31205 utils.go:239] [remaining 14m45s] Cluster size expected to be 5 nodes
I0906 15:54:34.838475   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:54:34.838497   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:54:34.838503   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:54:34.838508   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:54:34.841381   31205 utils.go:231] Node "2681b5eb-5ab3-41f4-ae2a-df4454b1e4bf". Ready: true. Unschedulable: true
I0906 15:54:34.841401   31205 utils.go:231] Node "dbe55a04-5eb1-42df-ae80-846f79da969a". Ready: true. Unschedulable: false
I0906 15:54:34.841407   31205 utils.go:231] Node "de17a193-07be-4165-bf27-a3510a938d6e". Ready: true. Unschedulable: false
I0906 15:54:34.841412   31205 utils.go:231] Node "f6279809-ed73-41b2-8127-87cfb777a67a". Ready: true. Unschedulable: false
I0906 15:54:34.841417   31205 utils.go:231] Node "fb6245e1-f356-4fab-9152-f02e39073964". Ready: true. Unschedulable: false
I0906 15:54:34.841422   31205 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:54:34.844205   31205 utils.go:87] Cluster size is 6 nodes
I0906 15:54:39.835391   31205 utils.go:239] [remaining 14m40s] Cluster size expected to be 5 nodes
I0906 15:54:39.838613   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:54:39.838635   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:54:39.838642   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:54:39.838647   31205 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:54:39.841465   31205 utils.go:231] Node "dbe55a04-5eb1-42df-ae80-846f79da969a". Ready: true. Unschedulable: false
I0906 15:54:39.841485   31205 utils.go:231] Node "de17a193-07be-4165-bf27-a3510a938d6e". Ready: true. Unschedulable: false
I0906 15:54:39.841491   31205 utils.go:231] Node "f6279809-ed73-41b2-8127-87cfb777a67a". Ready: true. Unschedulable: false
I0906 15:54:39.841496   31205 utils.go:231] Node "fb6245e1-f356-4fab-9152-f02e39073964". Ready: true. Unschedulable: false
I0906 15:54:39.841501   31205 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:54:39.847040   31205 utils.go:87] Cluster size is 5 nodes
I0906 15:54:39.847069   31205 utils.go:257] waiting for all nodes to be ready
I0906 15:54:39.850004   31205 utils.go:262] waiting for all nodes to be schedulable
I0906 15:54:39.852769   31205 utils.go:290] [remaining 1m0s] Node "dbe55a04-5eb1-42df-ae80-846f79da969a" is schedulable
I0906 15:54:39.852798   31205 utils.go:290] [remaining 1m0s] Node "de17a193-07be-4165-bf27-a3510a938d6e" is schedulable
I0906 15:54:39.852810   31205 utils.go:290] [remaining 1m0s] Node "f6279809-ed73-41b2-8127-87cfb777a67a" is schedulable
I0906 15:54:39.852820   31205 utils.go:290] [remaining 1m0s] Node "fb6245e1-f356-4fab-9152-f02e39073964" is schedulable
I0906 15:54:39.852830   31205 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable
I0906 15:54:39.852843   31205 utils.go:267] waiting for each node to be backed by a machine
I0906 15:54:39.860864   31205 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0906 15:54:39.860900   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-c54hf" is linked to node "dbe55a04-5eb1-42df-ae80-846f79da969a"
I0906 15:54:39.860916   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-2kj9n" is linked to node "f6279809-ed73-41b2-8127-87cfb777a67a"
I0906 15:54:39.860929   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-h4t9b" is linked to node "de17a193-07be-4165-bf27-a3510a938d6e"
I0906 15:54:39.860944   31205 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-ff79d" is linked to node "fb6245e1-f356-4fab-9152-f02e39073964"
I0906 15:54:39.860956   31205 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"
I0906 15:54:39.871218   31205 utils.go:378] [15m0s remaining] Found 0 number of nodes with map[node-role.kubernetes.io/worker: node-draining-test:29f771af-d0be-11e9-953e-0a2b6dea9fc2] label as expected

• [SLOW TEST:85.202 seconds]
[Feature:Machines] Managed cluster should
/data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126
  drain node before removing machine resource
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:346
------------------------------
[Feature:Machines] Managed cluster should 
  reject invalid machinesets
  /data/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:487
I0906 15:54:39.871336   31205 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: Creating invalid machineset
STEP: Waiting for ReconcileError MachineSet event
I0906 15:54:39.948426   31205 infra.go:506] Fetching ReconcileError MachineSet invalid-machineset event
I0906 15:54:39.948474   31205 infra.go:512] Found ReconcileError event for "invalid-machineset" machine set with the following message: "invalid-machineset" machineset validation failed: spec.template.metadata.labels: Invalid value: map[string]string{"big-kitty":"i-am-bit-kitty"}: `selector` does not match template `labels`
STEP: Verify no machine from "invalid-machineset" machineset were created
I0906 15:54:39.951489   31205 infra.go:528] Have 0 machines generated from "invalid-machineset" machineset
STEP: Deleting invalid machineset
•
Ran 7 of 16 Specs in 202.157 seconds
SUCCESS! -- 7 Passed | 0 Failed | 0 Pending | 9 Skipped
--- PASS: TestE2E (202.16s)
PASS
ok  	github.com/openshift/cluster-api-actuator-pkg/pkg/e2e	202.203s
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: RUN E2E TESTS [00h 04m 22s] ##########
[PostBuildScript] - Executing post build scripts.
[workspace] $ /bin/bash /tmp/jenkins4543774126737117882.sh
########## STARTING STAGE: DOWNLOAD ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/gathered
+ rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/gathered
+ mkdir -p /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/gathered
+ tree /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/gathered
/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/gathered

0 directories, 0 files
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins1104027302954288158.sh
########## STARTING STAGE: GENERATE ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/generated
+ rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/generated
+ mkdir /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/generated
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo docker version && sudo docker info && sudo docker images && sudo docker ps -a 2>&1'
  WARNING: You're not using the default seccomp profile
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo cat /etc/sysconfig/docker /etc/sysconfig/docker-network /etc/sysconfig/docker-storage /etc/sysconfig/docker-storage-setup /etc/systemd/system/docker.service 2>&1'
+ true
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo find /var/lib/docker/containers -name *.log | sudo xargs tail -vn +1 2>&1'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo ausearch -m AVC -m SELINUX_ERR -m USER_AVC 2>&1'
+ true
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo df -T -h && sudo pvs && sudo vgs && sudo lvs && sudo findmnt --all 2>&1'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo yum list installed 2>&1'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl --dmesg --no-pager --all --lines=all 2>&1'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl _PID=1 --no-pager --all --lines=all 2>&1'
+ tree /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/generated
/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/generated
├── avc_denials.log
├── containers.log
├── dmesg.log
├── docker.config
├── docker.info
├── filesystem.info
├── installed_packages.log
└── pid1.journal

0 directories, 8 files
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins1219475881723780557.sh
########## STARTING STAGE: FETCH SYSTEMD JOURNALS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/journals
+ rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/journals
+ mkdir /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/journals
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit docker.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit dnsmasq.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all
+ tree /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/journals
/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/artifacts/journals
├── dnsmasq.service
├── docker.service
└── systemd-journald.service

0 directories, 3 files
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins8317497395116082701.sh
########## STARTING STAGE: ASSEMBLE GCS OUTPUT ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
+ trap 'exit 0' EXIT
+ mkdir -p gcs/artifacts gcs/artifacts/generated gcs/artifacts/journals gcs/artifacts/gathered
++ python -c 'import json; import urllib; print json.load(urllib.urlopen('\''https://ci.openshift.redhat.com/jenkins/job/pull-ci-openshift-kubernetes-autoscaler-master-e2e/41/api/json'\''))['\''result'\'']'
+ result=SUCCESS
+ cat
++ date +%s
+ cat /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/builds/41/log
+ cp artifacts/generated/avc_denials.log artifacts/generated/containers.log artifacts/generated/dmesg.log artifacts/generated/docker.config artifacts/generated/docker.info artifacts/generated/filesystem.info artifacts/generated/installed_packages.log artifacts/generated/pid1.journal gcs/artifacts/generated/
+ cp artifacts/journals/dnsmasq.service artifacts/journals/docker.service artifacts/journals/systemd-journald.service gcs/artifacts/journals/
+ cp -r 'artifacts/gathered/*' gcs/artifacts/
cp: cannot stat ‘artifacts/gathered/*’: No such file or directory
++ export status=FAILURE
++ status=FAILURE
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins6009427369200317087.sh
########## STARTING STAGE: PUSH THE ARTIFACTS AND METADATA ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.1lvabC4ORu
+ cat
+ chmod +x /tmp/tmp.1lvabC4ORu
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.1lvabC4ORu openshiftdevel:/tmp/tmp.1lvabC4ORu
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 300 /tmp/tmp.1lvabC4ORu"'
+ cd /home/origin
+ trap 'exit 0' EXIT
+ [[ -n {"type":"presubmit","job":"pull-ci-openshift-kubernetes-autoscaler-master-e2e","buildid":"1169998239923965952","prowjobid":"44835668-d0bc-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"kubernetes-autoscaler","repo_link":"https://github.com/openshift/kubernetes-autoscaler","base_ref":"master","base_sha":"18a08df116691fed1236f4e53a67614dbc85b1fb","base_link":"https://github.com/openshift/kubernetes-autoscaler/commit/18a08df116691fed1236f4e53a67614dbc85b1fb","pulls":[{"number":116,"author":"frobware","sha":"470bd635e18fe2399da3e23cf71c3d649266c164","link":"https://github.com/openshift/kubernetes-autoscaler/pull/116","commit_link":"https://github.com/openshift/kubernetes-autoscaler/pull/116/commits/470bd635e18fe2399da3e23cf71c3d649266c164","author_link":"https://github.com/frobware"}]}} ]]
++ jq --compact-output '.buildid |= "41"'
+ JOB_SPEC='{"type":"presubmit","job":"pull-ci-openshift-kubernetes-autoscaler-master-e2e","buildid":"41","prowjobid":"44835668-d0bc-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"kubernetes-autoscaler","repo_link":"https://github.com/openshift/kubernetes-autoscaler","base_ref":"master","base_sha":"18a08df116691fed1236f4e53a67614dbc85b1fb","base_link":"https://github.com/openshift/kubernetes-autoscaler/commit/18a08df116691fed1236f4e53a67614dbc85b1fb","pulls":[{"number":116,"author":"frobware","sha":"470bd635e18fe2399da3e23cf71c3d649266c164","link":"https://github.com/openshift/kubernetes-autoscaler/pull/116","commit_link":"https://github.com/openshift/kubernetes-autoscaler/pull/116/commits/470bd635e18fe2399da3e23cf71c3d649266c164","author_link":"https://github.com/frobware"}]}}'
+ docker run -e 'JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-kubernetes-autoscaler-master-e2e","buildid":"41","prowjobid":"44835668-d0bc-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"kubernetes-autoscaler","repo_link":"https://github.com/openshift/kubernetes-autoscaler","base_ref":"master","base_sha":"18a08df116691fed1236f4e53a67614dbc85b1fb","base_link":"https://github.com/openshift/kubernetes-autoscaler/commit/18a08df116691fed1236f4e53a67614dbc85b1fb","pulls":[{"number":116,"author":"frobware","sha":"470bd635e18fe2399da3e23cf71c3d649266c164","link":"https://github.com/openshift/kubernetes-autoscaler/pull/116","commit_link":"https://github.com/openshift/kubernetes-autoscaler/pull/116/commits/470bd635e18fe2399da3e23cf71c3d649266c164","author_link":"https://github.com/frobware"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/gcsupload:latest --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin '/data/gcs/*'
Unable to find image 'registry.svc.ci.openshift.org/ci/gcsupload:latest' locally
Trying to pull repository registry.svc.ci.openshift.org/ci/gcsupload ... 
latest: Pulling from registry.svc.ci.openshift.org/ci/gcsupload
a073c86ecf9e: Already exists
cc3fc741b1a9: Already exists
822bed51ba40: Pulling fs layer
85cea451eec0: Pulling fs layer
85cea451eec0: Verifying Checksum
85cea451eec0: Download complete
822bed51ba40: Verifying Checksum
822bed51ba40: Download complete
822bed51ba40: Pull complete
85cea451eec0: Pull complete
Digest: sha256:03aad50d7ec631ee07c12ac2ba679bd48c7781f7d5754f9e0dcc4e7260e35208
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/gcsupload:latest
{"component":"gcsupload","file":"prow/gcsupload/run.go:107","func":"k8s.io/test-infra/prow/gcsupload.Options.assembleTargets","level":"warning","msg":"Encountered error in resolving items to upload for /data/gcs/*: stat /data/gcs/*: no such file or directory","time":"2019-09-06T15:55:01Z"}
{"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-kubernetes-autoscaler-master-e2e/41.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T15:55:01Z"}
{"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-kubernetes-autoscaler-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T15:55:01Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_kubernetes-autoscaler/116/pull-ci-openshift-kubernetes-autoscaler-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T15:55:01Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_kubernetes-autoscaler/116/pull-ci-openshift-kubernetes-autoscaler-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T15:55:02Z"}
{"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-kubernetes-autoscaler-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T15:55:02Z"}
{"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-kubernetes-autoscaler-master-e2e/41.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T15:55:02Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:65","func":"k8s.io/test-infra/prow/gcsupload.Options.Run","level":"info","msg":"Finished upload to GCS","time":"2019-09-06T15:55:02Z"}
+ exit 0
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: PUSH THE ARTIFACTS AND METADATA [00h 00m 06s] ##########
[workspace] $ /bin/bash /tmp/jenkins47189649540297075.sh
########## STARTING STAGE: DEPROVISION CLOUD RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config
+ oct deprovision

PLAYBOOK: main.yml *************************************************************
4 plays in /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml

PLAY [ensure we have the parameters necessary to deprovision virtual hosts] ****

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:9
skipping: [localhost] => (item=origin_ci_inventory_dir)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:55:03.401697", 
    "item": "origin_ci_inventory_dir", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_region)  => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:55:03.406036", 
    "item": "origin_ci_aws_region", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

PLAY [deprovision virtual hosts in EC2] ****************************************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

TASK [deprovision a virtual EC2 host] ******************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:28
included: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml for localhost

TASK [update the SSH configuration to remove AWS EC2 specifics] ****************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:2
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2019-09-06 11:55:04.224685", 
    "msg": ""
}

TASK [rename EC2 instance for termination reaper] ******************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:8
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-06 11:55:04.911253", 
    "msg": "Tags {'Name': 'oct-terminate'} created for resource i-03f1b8259c215731f."
}

TASK [tear down the EC2 instance] **********************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:15
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-06 11:55:05.810097", 
    "instance_ids": [
        "i-03f1b8259c215731f"
    ], 
    "instances": [
        {
            "ami_launch_index": "0", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-03eae6f84a19d5d4a"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-0afc8078905a83f21"
                }
            }, 
            "dns_name": "ec2-18-215-182-23.compute-1.amazonaws.com", 
            "ebs_optimized": false, 
            "groups": {
                "sg-7e73221a": "default"
            }, 
            "hypervisor": "xen", 
            "id": "i-03f1b8259c215731f", 
            "image_id": "ami-0b77b87a37c3e662c", 
            "instance_type": "m4.xlarge", 
            "kernel": null, 
            "key_name": "libra", 
            "launch_time": "2019-09-06T15:38:33.000Z", 
            "placement": "us-east-1c", 
            "private_dns_name": "ip-172-18-17-249.ec2.internal", 
            "private_ip": "172.18.17.249", 
            "public_dns_name": "ec2-18-215-182-23.compute-1.amazonaws.com", 
            "public_ip": "18.215.182.23", 
            "ramdisk": null, 
            "region": "us-east-1", 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "running", 
            "state_code": 16, 
            "tags": {
                "Name": "oct-terminate", 
                "openshift_etcd": "", 
                "openshift_master": "", 
                "openshift_node": ""
            }, 
            "tenancy": "default", 
            "virtualization_type": "hvm"
        }
    ], 
    "tagged_instances": []
}

TASK [remove the serialized host variables] ************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:22
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-06 11:55:06.049339", 
    "path": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.17.249.yml", 
    "state": "absent"
}

PLAY [deprovision virtual hosts locally manged by Vagrant] *********************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

PLAY [clean up local configuration for deprovisioned instances] ****************

TASK [remove inventory configuration directory] ********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:61
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2019-09-06 11:55:06.549505", 
    "path": "/var/lib/jenkins/jobs/pull-ci-openshift-kubernetes-autoscaler-master-e2e/workspace/.config/origin-ci-tool/inventory", 
    "state": "absent"
}

PLAY RECAP *********************************************************************
localhost                  : ok=8    changed=4    unreachable=0    failed=0   

+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPROVISION CLOUD RESOURCES [00h 00m 04s] ##########
Archiving artifacts
Recording test results
[WS-CLEANUP] Deleting project workspace...
[WS-CLEANUP] Deferred wipeout is used...
[WS-CLEANUP] done
Finished: SUCCESS