Started by user OpenShift CI Robot [EnvInject] - Loading node environment variables. Building in workspace /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is used... [workspace] $ /bin/bash /tmp/jenkins2572460924046027041.sh ########## STARTING STAGE: INSTALL THE ORIGIN-CI-TOOL ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] ++ readlink /var/lib/jenkins/origin-ci-tool/latest + latest=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed + touch /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed + cp /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin/activate /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate + cat + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config + mkdir -p /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config + rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool + oct configure ansible-client verbosity 2 Option verbosity updated to be 2. + oct configure aws-client keypair_name libra Option keypair_name updated to be libra. + oct configure aws-client private_key_path /var/lib/jenkins/.ssh/devenv.pem Option private_key_path updated to be /var/lib/jenkins/.ssh/devenv.pem. + set +o xtrace ########## FINISHED STAGE: SUCCESS: INSTALL THE ORIGIN-CI-TOOL [00h 00m 02s] ########## [workspace] $ /bin/bash /tmp/jenkins8935380932476770538.sh ########## STARTING STAGE: PROVISION CLOUD RESOURCES ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config + oct provision remote all-in-one --os rhel --stage base --provider aws --discrete-ssh-config --name pull-ci-openshift-cluster-api-provider-kubemark-master-e2e_54 PLAYBOOK: aws-up.yml *********************************************************** 2 plays in /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml PLAY [ensure we have the parameters necessary to bring up the AWS EC2 instance] *** TASK [ensure all required variables are set] *********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml:9 skipping: [localhost] => (item=origin_ci_inventory_dir) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.815244", "item": "origin_ci_inventory_dir", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_keypair_name) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.817634", "item": "origin_ci_aws_keypair_name", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_private_key_path) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.820705", "item": "origin_ci_aws_private_key_path", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_region) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.825185", "item": "origin_ci_aws_region", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_ami_tags) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.828252", "item": "origin_ci_aws_ami_tags", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_instance_name) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.831307", "item": "origin_ci_aws_instance_name", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_master_instance_type) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.834361", "item": "origin_ci_aws_master_instance_type", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_identifying_tag_key) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.837394", "item": "origin_ci_aws_identifying_tag_key", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_hostname) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.840472", "item": "origin_ci_aws_hostname", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_ssh_config_strategy) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.844685", "item": "origin_ci_ssh_config_strategy", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=openshift_schedulable) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.847726", "item": "openshift_schedulable", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=openshift_node_labels) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.850947", "item": "openshift_node_labels", "skip_reason": "Conditional check failed", "skipped": true } TASK [ensure all required variables are set] *********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml:27 skipping: [localhost] => (item=origin_ci_aws_master_subnet) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.884465", "item": "origin_ci_aws_master_subnet", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_etcd_security_group) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.889170", "item": "origin_ci_aws_etcd_security_group", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_node_security_group) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.893249", "item": "origin_ci_aws_node_security_group", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_master_security_group) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.898627", "item": "origin_ci_aws_master_security_group", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_master_external_elb_security_group) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.902818", "item": "origin_ci_aws_master_external_elb_security_group", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_master_internal_elb_security_group) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.908242", "item": "origin_ci_aws_master_internal_elb_security_group", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_router_security_group) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.912453", "item": "origin_ci_aws_router_security_group", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_router_elb_security_group) => { "changed": false, "generated_timestamp": "2019-08-05 08:02:29.918387", "item": "origin_ci_aws_router_elb_security_group", "skip_reason": "Conditional check failed", "skipped": true } PLAY [provision an AWS EC2 instance] ******************************************* TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [inventory : initialize the inventory directory] ************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:2 ok: [localhost] => { "changed": false, "generated_timestamp": "2019-08-05 08:02:30.733970", "gid": 995, "group": "jenkins", "mode": "0755", "owner": "jenkins", "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory", "secontext": "system_u:object_r:var_lib_t:s0", "size": 6, "state": "directory", "uid": 997 } TASK [inventory : add the nested group mapping] ******************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:7 changed: [localhost] => { "changed": true, "checksum": "18aaee00994df38cc3a63b635893175235331a9c", "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/nested_group_mappings", "generated_timestamp": "2019-08-05 08:02:31.196410", "gid": 995, "group": "jenkins", "md5sum": "b30c3226ea63efa3ff9c5e346c14a16e", "mode": "0644", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 93, "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1565006550.98-99976766586371/source", "state": "file", "uid": 997 } TASK [inventory : initialize the OSEv3 group variables directory] ************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:12 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-08-05 08:02:31.366633", "gid": 995, "group": "jenkins", "mode": "0755", "owner": "jenkins", "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/group_vars/OSEv3", "secontext": "system_u:object_r:var_lib_t:s0", "size": 6, "state": "directory", "uid": 997 } TASK [inventory : initialize the host variables directory] ********************* task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:17 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-08-05 08:02:31.529388", "gid": 995, "group": "jenkins", "mode": "0755", "owner": "jenkins", "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/host_vars", "secontext": "system_u:object_r:var_lib_t:s0", "size": 6, "state": "directory", "uid": 997 } TASK [inventory : add the default Origin installation configuration] *********** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:22 changed: [localhost] => { "changed": true, "checksum": "4c06ba508f055c20f13426e8587342e8765a7b66", "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/group_vars/OSEv3/general.yml", "generated_timestamp": "2019-08-05 08:02:31.820068", "gid": 995, "group": "jenkins", "md5sum": "8aec71c75f7d512b278ae7c6f2959b12", "mode": "0644", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 331, "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1565006551.68-152393965144781/source", "state": "file", "uid": 997 } TASK [aws-up : determine if we are inside AWS EC2] ***************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:2 changed: [localhost] => { "changed": true, "cmd": [ "curl", "-s", "http://instance-data.ec2.internal" ], "delta": "0:00:00.015721", "end": "2019-08-05 08:02:32.053742", "failed": false, "failed_when_result": false, "generated_timestamp": "2019-08-05 08:02:32.070028", "rc": 0, "start": "2019-08-05 08:02:32.038021", "stderr": [], "stdout": [ "1.0", "2007-01-19", "2007-03-01", "2007-08-29", "2007-10-10", "2007-12-15", "2008-02-01", "2008-09-01", "2009-04-04", "2011-01-01", "2011-05-01", "2012-01-12", "2014-02-25", "2014-11-05", "2015-10-20", "2016-04-19", "2016-06-30", "2016-09-02", "2018-03-28", "2018-08-17", "2018-09-24", "latest" ], "warnings": [ "Consider using get_url or uri module rather than running curl" ] } [WARNING]: Consider using get_url or uri module rather than running curl TASK [aws-up : configure EC2 parameters for inventory when controlling from inside EC2] *** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:7 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_destination_variable": "private_dns_name", "origin_ci_aws_host_address_variable": "private_ip", "origin_ci_aws_vpc_destination_variable": "private_ip_address" }, "changed": false, "generated_timestamp": "2019-08-05 08:02:32.109787" } TASK [aws-up : determine where to put the AWS API cache] *********************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:14 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_cache_dir": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ec2_cache" }, "changed": false, "generated_timestamp": "2019-08-05 08:02:32.142701" } TASK [aws-up : ensure we have a place to put the AWS API cache] **************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:18 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-08-05 08:02:32.308611", "gid": 995, "group": "jenkins", "mode": "0755", "owner": "jenkins", "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ec2_cache", "secontext": "system_u:object_r:var_lib_t:s0", "size": 6, "state": "directory", "uid": 997 } TASK [aws-up : place the EC2 dynamic inventory script] ************************* task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:23 changed: [localhost] => { "changed": true, "checksum": "625b8af723189db3b96ba0026d0f997a0025bc47", "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/ec2.py", "generated_timestamp": "2019-08-05 08:02:32.600210", "gid": 995, "group": "jenkins", "md5sum": "cac06c14065dac74904232b89d4ba24c", "mode": "0755", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 63725, "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1565006552.46-39372511318074/source", "state": "file", "uid": 997 } TASK [aws-up : place the EC2 dynamic inventory configuration] ****************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:29 changed: [localhost] => { "changed": true, "checksum": "e09be30038ede44bc3da57c7bd504b96abe6ee0a", "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/ec2.ini", "generated_timestamp": "2019-08-05 08:02:32.892013", "gid": 995, "group": "jenkins", "md5sum": "458598cc5cdc5a0bc6a461229956ab5f", "mode": "0644", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 438, "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1565006552.64-50460603858971/source", "state": "file", "uid": 997 } TASK [aws-up : place the EC2 tag to group mappings] **************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:34 changed: [localhost] => { "changed": true, "checksum": "b4205a33dc73f62bd4f77f35d045cf8e09ae62b0", "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/tag_to_group_mappings", "generated_timestamp": "2019-08-05 08:02:33.185804", "gid": 995, "group": "jenkins", "md5sum": "bc3a567a1b6f342e1005182efc1b66be", "mode": "0644", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 287, "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1565006553.05-131298656267061/source", "state": "file", "uid": 997 } TASK [aws-up : list available AMIs] ******************************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:40 ok: [localhost] => { "changed": false, "generated_timestamp": "2019-08-05 08:02:35.866073", "results": [ { "ami_id": "ami-04f9b88b6b0571f20", "architecture": "x86_64", "block_device_mapping": { "/dev/sda1": { "delete_on_termination": true, "encrypted": false, "size": 75, "snapshot_id": "snap-0655d2d962c590c8c", "volume_type": "gp2" }, "/dev/sdb": { "delete_on_termination": true, "encrypted": false, "size": 50, "snapshot_id": "snap-0d86ae865b17f4def", "volume_type": "gp2" } }, "creationDate": "2018-06-26T12:22:31.000Z", "description": "OpenShift Origin development AMI on rhel at the base stage.", "hypervisor": "xen", "is_public": false, "location": "531415883065/ami_build_origin_int_rhel_base_758", "name": "ami_build_origin_int_rhel_base_758", "owner_id": "531415883065", "platform": null, "root_device_name": "/dev/sda1", "root_device_type": "ebs", "state": "available", "tags": { "Name": "ami_build_origin_int_rhel_base_758", "image_stage": "base", "operating_system": "rhel", "ready": "yes" }, "virtualization_type": "hvm" }, { "ami_id": "ami-0b77b87a37c3e662c", "architecture": "x86_64", "block_device_mapping": { "/dev/sda1": { "delete_on_termination": true, "encrypted": false, "size": 75, "snapshot_id": "snap-02ec23d4818f2747e", "volume_type": "gp2" }, "/dev/sdb": { "delete_on_termination": true, "encrypted": false, "size": 50, "snapshot_id": "snap-0d8726e441d4ca329", "volume_type": "gp2" } }, "creationDate": "2018-06-26T22:18:53.000Z", "description": "OpenShift Origin development AMI on rhel at the base stage.", "hypervisor": "xen", "is_public": false, "location": "531415883065/ami_build_origin_int_rhel_base_760", "name": "ami_build_origin_int_rhel_base_760", "owner_id": "531415883065", "platform": null, "root_device_name": "/dev/sda1", "root_device_type": "ebs", "state": "available", "tags": { "Name": "ami_build_origin_int_rhel_base_760", "image_stage": "base", "operating_system": "rhel", "ready": "yes" }, "virtualization_type": "hvm" } ] } TASK [aws-up : choose appropriate AMIs for use] ******************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:49 ok: [localhost] => (item={u'ami_id': u'ami-04f9b88b6b0571f20', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the base stage.', u'tags': {u'ready': u'yes', u'image_stage': u'base', u'Name': u'ami_build_origin_int_rhel_base_758', u'operating_system': u'rhel'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-0d86ae865b17f4def', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 50}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-0655d2d962c590c8c', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 75}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_base_758', u'is_public': False, u'creationDate': u'2018-06-26T12:22:31.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_base_758'}) => { "ansible_facts": { "origin_ci_aws_ami_id_candidate": "ami-04f9b88b6b0571f20" }, "changed": false, "generated_timestamp": "2019-08-05 08:02:35.911010", "item": { "ami_id": "ami-04f9b88b6b0571f20", "architecture": "x86_64", "block_device_mapping": { "/dev/sda1": { "delete_on_termination": true, "encrypted": false, "size": 75, "snapshot_id": "snap-0655d2d962c590c8c", "volume_type": "gp2" }, "/dev/sdb": { "delete_on_termination": true, "encrypted": false, "size": 50, "snapshot_id": "snap-0d86ae865b17f4def", "volume_type": "gp2" } }, "creationDate": "2018-06-26T12:22:31.000Z", "description": "OpenShift Origin development AMI on rhel at the base stage.", "hypervisor": "xen", "is_public": false, "location": "531415883065/ami_build_origin_int_rhel_base_758", "name": "ami_build_origin_int_rhel_base_758", "owner_id": "531415883065", "platform": null, "root_device_name": "/dev/sda1", "root_device_type": "ebs", "state": "available", "tags": { "Name": "ami_build_origin_int_rhel_base_758", "image_stage": "base", "operating_system": "rhel", "ready": "yes" }, "virtualization_type": "hvm" } } ok: [localhost] => (item={u'ami_id': u'ami-0b77b87a37c3e662c', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the base stage.', u'tags': {u'ready': u'yes', u'image_stage': u'base', u'Name': u'ami_build_origin_int_rhel_base_760', u'operating_system': u'rhel'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-0d8726e441d4ca329', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 50}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-02ec23d4818f2747e', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 75}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_base_760', u'is_public': False, u'creationDate': u'2018-06-26T22:18:53.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_base_760'}) => { "ansible_facts": { "origin_ci_aws_ami_id_candidate": "ami-0b77b87a37c3e662c" }, "changed": false, "generated_timestamp": "2019-08-05 08:02:35.917987", "item": { "ami_id": "ami-0b77b87a37c3e662c", "architecture": "x86_64", "block_device_mapping": { "/dev/sda1": { "delete_on_termination": true, "encrypted": false, "size": 75, "snapshot_id": "snap-02ec23d4818f2747e", "volume_type": "gp2" }, "/dev/sdb": { "delete_on_termination": true, "encrypted": false, "size": 50, "snapshot_id": "snap-0d8726e441d4ca329", "volume_type": "gp2" } }, "creationDate": "2018-06-26T22:18:53.000Z", "description": "OpenShift Origin development AMI on rhel at the base stage.", "hypervisor": "xen", "is_public": false, "location": "531415883065/ami_build_origin_int_rhel_base_760", "name": "ami_build_origin_int_rhel_base_760", "owner_id": "531415883065", "platform": null, "root_device_name": "/dev/sda1", "root_device_type": "ebs", "state": "available", "tags": { "Name": "ami_build_origin_int_rhel_base_760", "image_stage": "base", "operating_system": "rhel", "ready": "yes" }, "virtualization_type": "hvm" } } TASK [aws-up : determine which AMI to use] ************************************* task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:55 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_ami_id": "ami-0b77b87a37c3e662c" }, "changed": false, "generated_timestamp": "2019-08-05 08:02:35.953038" } TASK [aws-up : determine which subnets are available] ************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:60 ok: [localhost] => { "changed": false, "generated_timestamp": "2019-08-05 08:02:36.886574", "subnets": [ { "availability_zone": "us-east-1c", "available_ip_address_count": 4070, "cidr_block": "172.18.16.0/20", "default_for_az": "false", "id": "subnet-8bdb5ac2", "map_public_ip_on_launch": "true", "state": "available", "tags": { "Name": "devenv-subnet-2", "origin_ci_aws_cluster_component": "master_subnet" }, "vpc_id": "vpc-69705d0c" }, { "availability_zone": "us-east-1d", "available_ip_address_count": 4050, "cidr_block": "172.18.0.0/20", "default_for_az": "false", "id": "subnet-cf57c596", "map_public_ip_on_launch": "true", "state": "available", "tags": { "Name": "devenv-subnet-1", "origin_ci_aws_cluster_component": "master_subnet" }, "vpc_id": "vpc-69705d0c" } ] } TASK [aws-up : determine which subnets to use for the master] ****************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:67 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_master_subnet_ids": [ "subnet-8bdb5ac2", "subnet-cf57c596" ] }, "changed": false, "generated_timestamp": "2019-08-05 08:02:36.928490" } TASK [aws-up : determine which security groups are available] ****************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:72 ok: [localhost] => { "changed": false, "generated_timestamp": "2019-08-05 08:02:37.969947", "security_groups": [ { "description": "default VPC security group", "group_id": "sg-7e73221a", "group_name": "default", "ip_permissions": [ { "ip_protocol": "-1", "ip_ranges": [], "ipv6_ranges": [], "prefix_list_ids": [], "user_id_group_pairs": [ { "group_id": "sg-7e73221a", "user_id": "531415883065" } ] }, { "from_port": 53, "ip_protocol": "tcp", "ip_ranges": [ { "cidr_ip": "119.254.120.64/26" }, { "cidr_ip": "209.132.176.0/20" }, { "cidr_ip": "209.132.186.34/32" }, { "cidr_ip": "213.175.37.10/32" }, { "cidr_ip": "213.175.37.9/32" }, { "cidr_ip": "38.140.108.0/24" }, { "cidr_ip": "38.99.12.232/29" }, { "cidr_ip": "4.14.33.72/30" }, { "cidr_ip": "4.14.35.88/29" }, { "cidr_ip": "50.227.40.96/29" }, { "cidr_ip": "62.40.79.66/32" }, { "cidr_ip": "66.187.224.0/20" }, { "cidr_ip": "66.187.239.0/24" } ], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": 8444, "user_id_group_pairs": [] }, { "from_port": 80, "ip_protocol": "tcp", "ip_ranges": [ { "cidr_ip": "107.20.219.35/32" }, { "cidr_ip": "108.166.48.153/32" }, { "cidr_ip": "212.199.177.64/27" }, { "cidr_ip": "212.72.208.162/32" }, { "cidr_ip": "54.241.19.245/32" }, { "cidr_ip": "97.65.119.184/29" } ], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": 443, "user_id_group_pairs": [] }, { "from_port": 22, "ip_protocol": "tcp", "ip_ranges": [ { "cidr_ip": "0.0.0.0/0" } ], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": 22, "user_id_group_pairs": [] }, { "from_port": 53, "ip_protocol": "udp", "ip_ranges": [ { "cidr_ip": "209.132.176.0/20" }, { "cidr_ip": "66.187.224.0/20" }, { "cidr_ip": "66.187.239.0/24" } ], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": 53, "user_id_group_pairs": [] }, { "from_port": 0, "ip_protocol": "udp", "ip_ranges": [], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": 65535, "user_id_group_pairs": [ { "group_id": "sg-0d1a5377", "user_id": "531415883065" }, { "group_id": "sg-5875023f", "user_id": "531415883065" }, { "group_id": "sg-7e73221a", "user_id": "531415883065" }, { "group_id": "sg-e1760186", "user_id": "531415883065" } ] }, { "from_port": 3389, "ip_protocol": "tcp", "ip_ranges": [ { "cidr_ip": "0.0.0.0/0" } ], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": 3389, "user_id_group_pairs": [] }, { "from_port": -1, "ip_protocol": "icmp", "ip_ranges": [ { "cidr_ip": "0.0.0.0/0" } ], "ipv6_ranges": [], "prefix_list_ids": [], "to_port": -1, "user_id_group_pairs": [] } ], "ip_permissions_egress": [ { "ip_protocol": "-1", "ip_ranges": [ { "cidr_ip": "0.0.0.0/0" } ], "ipv6_ranges": [], "prefix_list_ids": [], "user_id_group_pairs": [] } ], "owner_id": "531415883065", "tags": { "Name": "devenv-vpc", "openshift_infra": "true", "origin_ci_aws_cluster_component": "master_security_group" }, "vpc_id": "vpc-69705d0c" } ] } TASK [aws-up : determine which security group to use] ************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:79 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_master_security_group_ids": [ "sg-7e73221a" ] }, "changed": false, "generated_timestamp": "2019-08-05 08:02:38.039129" } TASK [aws-up : provision an AWS EC2 instance] ********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:84 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-08-05 08:02:55.992832", "instance_ids": [ "i-0af26858e335b2afe" ], "instances": [ { "ami_launch_index": "0", "architecture": "x86_64", "block_device_mapping": { "/dev/sda1": { "delete_on_termination": true, "status": "attached", "volume_id": "vol-0d4a41b559474cec4" }, "/dev/sdb": { "delete_on_termination": true, "status": "attached", "volume_id": "vol-0d4f042e0d382af62" } }, "dns_name": "ec2-34-229-130-184.compute-1.amazonaws.com", "ebs_optimized": false, "groups": { "sg-7e73221a": "default" }, "hypervisor": "xen", "id": "i-0af26858e335b2afe", "image_id": "ami-0b77b87a37c3e662c", "instance_type": "m4.xlarge", "kernel": null, "key_name": "libra", "launch_time": "2019-08-05T12:02:40.000Z", "placement": "us-east-1c", "private_dns_name": "ip-172-18-31-47.ec2.internal", "private_ip": "172.18.31.47", "public_dns_name": "ec2-34-229-130-184.compute-1.amazonaws.com", "public_ip": "34.229.130.184", "ramdisk": null, "region": "us-east-1", "root_device_name": "/dev/sda1", "root_device_type": "ebs", "state": "running", "state_code": 16, "tags": { "Name": "pull-ci-openshift-cluster-api-provider-kubemark-master-e2e_54", "openshift_etcd": "", "openshift_master": "", "openshift_node": "" }, "tenancy": "default", "virtualization_type": "hvm" } ], "tagged_instances": [] } TASK [aws-up : determine the host address] ************************************* task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:110 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_host": "172.18.31.47" }, "changed": false, "generated_timestamp": "2019-08-05 08:02:56.030945" } TASK [aws-up : determine the default user to use for SSH] ********************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:114 skipping: [localhost] => { "changed": false, "generated_timestamp": "2019-08-05 08:02:56.062062", "skip_reason": "Conditional check failed", "skipped": true } TASK [aws-up : determine the default user to use for SSH] ********************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:119 ok: [localhost] => { "ansible_facts": { "origin_ci_aws_ssh_user": "origin" }, "changed": false, "generated_timestamp": "2019-08-05 08:02:56.097083" } TASK [aws-up : update variables for the host] ********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:124 changed: [localhost] => { "changed": true, "checksum": "85b085dae89b9a2e02f497f1abe239707ff85894", "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.31.47.yml", "generated_timestamp": "2019-08-05 08:02:56.394569", "gid": 995, "group": "jenkins", "md5sum": "35a0c052dba8df09e0acaf700398d573", "mode": "0644", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 776, "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1565006576.26-153508547586076/source", "state": "file", "uid": 997 } TASK [aws-up : determine where updated SSH configuration should go] ************ task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:141 ok: [localhost] => { "ansible_facts": { "origin_ci_ssh_config_files": [ "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config" ] }, "changed": false, "generated_timestamp": "2019-08-05 08:02:56.433936" } TASK [aws-up : determine where updated SSH configuration should go] ************ task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:146 skipping: [localhost] => { "changed": false, "generated_timestamp": "2019-08-05 08:02:56.466609", "skip_reason": "Conditional check failed", "skipped": true } TASK [aws-up : ensure the targeted SSH configuration file exists] ************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:151 changed: [localhost] => (item=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config) => { "changed": true, "dest": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config", "generated_timestamp": "2019-08-05 08:02:56.633570", "gid": 995, "group": "jenkins", "item": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config", "mode": "0644", "owner": "jenkins", "secontext": "system_u:object_r:var_lib_t:s0", "size": 0, "state": "file", "uid": 997 } TASK [aws-up : update the SSH configuration] *********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:157 changed: [localhost] => (item=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config) => { "changed": true, "generated_timestamp": "2019-08-05 08:02:56.888223", "item": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config", "msg": "Block inserted" } TASK [aws-up : wait for SSH to be available] *********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:175 ok: [localhost] => { "changed": false, "elapsed": 63, "generated_timestamp": "2019-08-05 08:04:00.217423", "path": null, "port": 22, "search_regex": null, "state": "started" } PLAY RECAP ********************************************************************* localhost : ok=28 changed=13 unreachable=0 failed=0 + set +o xtrace ########## FINISHED STAGE: SUCCESS: PROVISION CLOUD RESOURCES [00h 01m 31s] ########## [workspace] $ /bin/bash /tmp/jenkins2593600171339987910.sh ########## STARTING STAGE: FORWARD GCS CREDENTIALS TO REMOTE HOST ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config + (( i = 0 )) + (( i < 10 )) + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.config/gcloud/gcs-publisher-credentials.json openshiftdevel:/data/credentials.json + break + set +o xtrace ########## FINISHED STAGE: SUCCESS: FORWARD GCS CREDENTIALS TO REMOTE HOST [00h 00m 02s] ########## [workspace] $ /bin/bash /tmp/jenkins530053625274167623.sh ########## STARTING STAGE: FORWARD PARAMETERS TO THE REMOTE HOST ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod o+rw /etc/environment + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-cluster-api-provider-kubemark-master-e2e","buildid":"1158347496229965824","prowjobid":"d2ff9911-b778-11e9-afc1-0a58ac10c12b","refs":{"org":"openshift","repo":"cluster-api-provider-kubemark","repo_link":"https://github.com/openshift/cluster-api-provider-kubemark","base_ref":"master","base_sha":"58acd13e6caa24f1f47942e2628a692fb5a8fdeb","base_link":"https://github.com/openshift/cluster-api-provider-kubemark/commit/58acd13e6caa24f1f47942e2628a692fb5a8fdeb","pulls":[{"number":27,"author":"frobware","sha":"e43c339c9c23beec199855e697eed29abdd194f0","link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27","commit_link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27/commits/e43c339c9c23beec199855e697eed29abdd194f0","author_link":"https://github.com/frobware"}]}}'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''buildId='\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_ID=1158347496229965824'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''REPO_OWNER=openshift'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''REPO_NAME=cluster-api-provider-kubemark'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_BASE_REF=master'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_BASE_SHA=58acd13e6caa24f1f47942e2628a692fb5a8fdeb'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_REFS=master:58acd13e6caa24f1f47942e2628a692fb5a8fdeb,27:e43c339c9c23beec199855e697eed29abdd194f0'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_NUMBER=27'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_PULL_SHA=e43c339c9c23beec199855e697eed29abdd194f0'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-cluster-api-provider-kubemark-master-e2e","buildid":"1158347496229965824","prowjobid":"d2ff9911-b778-11e9-afc1-0a58ac10c12b","refs":{"org":"openshift","repo":"cluster-api-provider-kubemark","repo_link":"https://github.com/openshift/cluster-api-provider-kubemark","base_ref":"master","base_sha":"58acd13e6caa24f1f47942e2628a692fb5a8fdeb","base_link":"https://github.com/openshift/cluster-api-provider-kubemark/commit/58acd13e6caa24f1f47942e2628a692fb5a8fdeb","pulls":[{"number":27,"author":"frobware","sha":"e43c339c9c23beec199855e697eed29abdd194f0","link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27","commit_link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27/commits/e43c339c9c23beec199855e697eed29abdd194f0","author_link":"https://github.com/frobware"}]}}'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_NUMBER=54'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''CLONEREFS_ARGS='\'' >> /etc/environment' + set +o xtrace ########## FINISHED STAGE: SUCCESS: FORWARD PARAMETERS TO THE REMOTE HOST [00h 00m 04s] ########## [workspace] $ /bin/bash /tmp/jenkins8697514092738610700.sh ########## STARTING STAGE: SYNC REPOSITORIES ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.uFAB6iXeH9 + cat + chmod +x /tmp/tmp.uFAB6iXeH9 + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.uFAB6iXeH9 openshiftdevel:/tmp/tmp.uFAB6iXeH9 + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.uFAB6iXeH9"' + cd /home/origin ++ jq --compact-output '.buildid |= "54"' + JOB_SPEC='{"type":"presubmit","job":"pull-ci-openshift-cluster-api-provider-kubemark-master-e2e","buildid":"54","prowjobid":"d2ff9911-b778-11e9-afc1-0a58ac10c12b","refs":{"org":"openshift","repo":"cluster-api-provider-kubemark","repo_link":"https://github.com/openshift/cluster-api-provider-kubemark","base_ref":"master","base_sha":"58acd13e6caa24f1f47942e2628a692fb5a8fdeb","base_link":"https://github.com/openshift/cluster-api-provider-kubemark/commit/58acd13e6caa24f1f47942e2628a692fb5a8fdeb","pulls":[{"number":27,"author":"frobware","sha":"e43c339c9c23beec199855e697eed29abdd194f0","link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27","commit_link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27/commits/e43c339c9c23beec199855e697eed29abdd194f0","author_link":"https://github.com/frobware"}]}}' + for image in ''\''registry.svc.ci.openshift.org/ci/clonerefs:latest'\''' ''\''registry.svc.ci.openshift.org/ci/initupload:latest'\''' + (( i = 0 )) + (( i < 5 )) + docker pull registry.svc.ci.openshift.org/ci/clonerefs:latest Trying to pull repository registry.svc.ci.openshift.org/ci/clonerefs ... latest: Pulling from registry.svc.ci.openshift.org/ci/clonerefs 1160f4abea84: Pulling fs layer be60dbe7622d: Pulling fs layer d26b76701841: Pulling fs layer b81d9d08eb39: Pulling fs layer 3a00cbb24bdb: Pulling fs layer b81d9d08eb39: Waiting 3a00cbb24bdb: Waiting be60dbe7622d: Verifying Checksum be60dbe7622d: Download complete 1160f4abea84: Verifying Checksum 1160f4abea84: Download complete 3a00cbb24bdb: Verifying Checksum 3a00cbb24bdb: Download complete b81d9d08eb39: Verifying Checksum b81d9d08eb39: Download complete d26b76701841: Verifying Checksum d26b76701841: Download complete 1160f4abea84: Pull complete be60dbe7622d: Pull complete d26b76701841: Pull complete b81d9d08eb39: Pull complete 3a00cbb24bdb: Pull complete Digest: sha256:636da43e513d52bc47d71502aaa9028804e8e320d6d31b626fa781b0bb3ffbd7 Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/clonerefs:latest + break + for image in ''\''registry.svc.ci.openshift.org/ci/clonerefs:latest'\''' ''\''registry.svc.ci.openshift.org/ci/initupload:latest'\''' + (( i = 0 )) + (( i < 5 )) + docker pull registry.svc.ci.openshift.org/ci/initupload:latest Trying to pull repository registry.svc.ci.openshift.org/ci/initupload ... latest: Pulling from registry.svc.ci.openshift.org/ci/initupload a073c86ecf9e: Pulling fs layer cc3fc741b1a9: Pulling fs layer 92d82c29d33c: Pulling fs layer 8e5b170ec95b: Pulling fs layer 8e5b170ec95b: Waiting a073c86ecf9e: Verifying Checksum a073c86ecf9e: Download complete cc3fc741b1a9: Verifying Checksum cc3fc741b1a9: Download complete 92d82c29d33c: Verifying Checksum 92d82c29d33c: Download complete 8e5b170ec95b: Verifying Checksum 8e5b170ec95b: Download complete a073c86ecf9e: Pull complete cc3fc741b1a9: Pull complete 92d82c29d33c: Pull complete 8e5b170ec95b: Pull complete Digest: sha256:32dde8315af72915139b3adfa32125f92d4a6e8c6943275ec72e7cfde3196a64 Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/initupload:latest + break + clonerefs_args='--repo=openshift,machine-api-operator=master --repo=openshift,cluster-autoscaler-operator=master --repo=openshift,kubernetes-autoscaler=master ' + docker run -v /data:/data:z registry.svc.ci.openshift.org/ci/clonerefs:latest --src-root=/data --log=/data/clone.json --repo=openshift,cluster-api-provider-kubemark=master:58acd13e6caa24f1f47942e2628a692fb5a8fdeb,27:e43c339c9c23beec199855e697eed29abdd194f0 --repo=openshift,machine-api-operator=master --repo=openshift,cluster-autoscaler-operator=master --repo=openshift,kubernetes-autoscaler=master {"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-api-provider-kubemark","base_ref":"master","base_sha":"58acd13e6caa24f1f47942e2628a692fb5a8fdeb","pulls":[{"number":27,"author":"","sha":"e43c339c9c23beec199855e697eed29abdd194f0"}]},"time":"2019-08-05T12:05:03Z"} {"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"kubernetes-autoscaler","base_ref":"master"},"time":"2019-08-05T12:05:03Z"} {"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-autoscaler-operator","base_ref":"master"},"time":"2019-08-05T12:05:03Z"} {"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"machine-api-operator","base_ref":"master"},"time":"2019-08-05T12:05:03Z"} {"command":"mkdir -p /data/src/github.com/openshift/cluster-api-provider-kubemark","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:03Z"} {"command":"mkdir -p /data/src/github.com/openshift/cluster-autoscaler-operator","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:03Z"} {"command":"mkdir -p /data/src/github.com/openshift/kubernetes-autoscaler","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:03Z"} {"command":"mkdir -p /data/src/github.com/openshift/machine-api-operator","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:03Z"} {"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-api-provider-kubemark/.git/\n","time":"2019-08-05T12:05:03Z"} {"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-autoscaler-operator/.git/\n","time":"2019-08-05T12:05:03Z"} {"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:03Z"} {"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:03Z"} {"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:03Z"} {"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:03Z"} {"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/kubernetes-autoscaler/.git/\n","time":"2019-08-05T12:05:03Z"} {"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/machine-api-operator/.git/\n","time":"2019-08-05T12:05:03Z"} {"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:03Z"} {"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:03Z"} {"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:03Z"} {"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:03Z"} {"command":"git fetch https://github.com/openshift/machine-api-operator.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/machine-api-operator\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] v0.1.0 -\u003e v0.1.0\n * [new tag] v0.2.0 -\u003e v0.2.0\n","time":"2019-08-05T12:05:07Z"} {"command":"git fetch https://github.com/openshift/cluster-autoscaler-operator.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-autoscaler-operator\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] v0.0.0 -\u003e v0.0.0\n","time":"2019-08-05T12:05:08Z"} {"command":"git fetch https://github.com/openshift/machine-api-operator.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/machine-api-operator\n * branch master -\u003e FETCH_HEAD\n","time":"2019-08-05T12:05:09Z"} {"command":"git fetch https://github.com/openshift/cluster-autoscaler-operator.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-autoscaler-operator\n * branch master -\u003e FETCH_HEAD\n","time":"2019-08-05T12:05:09Z"} {"command":"git fetch https://github.com/openshift/cluster-api-provider-kubemark.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-kubemark\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] v1.0 -\u003e v1.0\n","time":"2019-08-05T12:05:09Z"} {"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at fde49c53... Merge pull request #113 from bison/fix-target-update\n","time":"2019-08-05T12:05:09Z"} {"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:09Z"} {"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 1b52288... Merge pull request #367 from sadasu/metal3-images\n","time":"2019-08-05T12:05:09Z"} {"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:09Z"} {"command":"git fetch https://github.com/openshift/cluster-api-provider-kubemark.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-kubemark\n * branch master -\u003e FETCH_HEAD\n","time":"2019-08-05T12:05:09Z"} {"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-08-05T12:05:10Z"} {"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-08-05T12:05:10Z"} {"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:10Z"} {"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:10Z"} {"command":"git checkout 58acd13e6caa24f1f47942e2628a692fb5a8fdeb","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out '58acd13e6caa24f1f47942e2628a692fb5a8fdeb'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 58acd13e... Merge pull request #26 from vikaschoudhary16/revendor-k8s-drain\n","time":"2019-08-05T12:05:11Z"} {"command":"git branch --force master 58acd13e6caa24f1f47942e2628a692fb5a8fdeb","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:11Z"} {"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-08-05T12:05:11Z"} {"command":"git fetch https://github.com/openshift/cluster-api-provider-kubemark.git pull/27/head","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-kubemark\n * branch refs/pull/27/head -\u003e FETCH_HEAD\n","time":"2019-08-05T12:05:12Z"} {"command":"git merge --no-ff e43c339c9c23beec199855e697eed29abdd194f0","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Removing vendor/github.com/openshift/cluster-api/pkg/controller/add_node.go\nMerge made by the 'recursive' strategy.\n Gopkg.lock | 4 +--\n vendor/github.com/openshift/cluster-api/Gopkg.lock | 9 ++++---\n vendor/github.com/openshift/cluster-api/Gopkg.toml | 5 ++--\n .../config/crds/machine_v1beta1_machine.yaml | 31 ++++++++++++++++++++++\n .../config/crds/machine_v1beta1_machineset.yaml | 21 +++++++++++++++\n .../pkg/apis/machine/v1beta1/machine_types.go | 7 +++++\n .../pkg/apis/machine/v1beta1/machineset_types.go | 5 ++++\n .../cluster-api/pkg/controller/add_node.go | 26 ------------------\n .../pkg/controller/machine/controller.go | 12 +++++++++\n .../pkg/controller/machinedeployment/controller.go | 7 +++++\n .../pkg/controller/machinedeployment/sync.go | 6 +++++\n .../pkg/controller/machineset/controller.go | 6 +++++\n 12 files changed, 105 insertions(+), 34 deletions(-)\n delete mode 100644 vendor/github.com/openshift/cluster-api/pkg/controller/add_node.go\n","time":"2019-08-05T12:05:12Z"} {"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:13Z"} {"command":"git fetch https://github.com/openshift/kubernetes-autoscaler.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/kubernetes-autoscaler\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] addon-resizer-1.8.0 -\u003e addon-resizer-1.8.0\n * [new tag] addon-resizer-1.8.1 -\u003e addon-resizer-1.8.1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.37.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.37.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.38.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.38.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.39.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.39.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.40.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.40.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.41.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.41.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.42.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.42.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.43.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.43.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.44.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.44.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.46.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.46.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.47.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.47.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.50.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.50.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.51.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.51.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.52.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.52.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.53.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.53.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.54.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.54.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.56.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.56.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.57.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.57.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.58.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.58.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.60.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.60.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.61.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.61.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.63.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.63.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.64.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.64.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.65.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.65.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.66.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.66.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.67.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.67.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.68.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.68.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.69.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.69.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-1666 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-1666\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.1-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.1-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.10-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.10-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.11-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.11-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.12-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.12-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.13-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.13-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.14-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.14-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.15-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.15-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.16-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.16-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.17-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.17-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.18-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.18-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.2-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.2-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.21-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.21-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.22-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.22-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.23-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.23-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.3-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.3-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.5-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.5-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.6-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.6-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.7-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.7-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.8-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.8-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.9-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.9-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.10.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.10.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.11.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.11.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.13.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.13.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.14.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.14.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.15.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.15.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.16.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.16.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.17.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.17.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.18.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.18.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.19.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.19.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.20.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.20.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.21.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.21.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.22.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.22.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.23.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.23.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.24.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.24.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.25.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.25.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.26.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.26.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.27.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.27.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.28.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.28.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.30.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.30.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.32.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.32.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.5.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.5.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.7.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.7.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.8.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.8.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.9.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.9.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.100-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.100-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.104-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.104-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.105-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.105-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.106-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.106-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.107-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.107-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.108-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.108-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.109-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.109-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.11-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.11-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.110-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.110-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.111-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.111-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.112-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.112-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.113-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.113-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.114-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.114-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.115-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.115-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.116-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.116-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.117-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.117-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.119-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.119-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.12-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.12-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.121-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.121-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.122-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.122-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.123-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.123-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.124-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.124-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.125-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.125-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.126-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.126-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.127-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.127-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.128-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.128-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.129-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.129-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.13-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.13-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.130-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.130-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.131-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.131-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.132-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.132-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.133-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.133-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.134-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.134-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.135-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.135-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.136-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.136-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.14-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.14-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.15-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.15-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.16-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.16-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.17-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.17-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.18-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.18-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.19-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.19-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.20-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.20-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.21-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.21-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.22-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.22-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.23-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.23-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.24-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.24-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.25-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.25-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.26-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.26-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.27-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.27-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.28-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.28-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.29-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.29-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.3-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.3-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.30-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.30-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.31-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.31-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.32-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.32-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.33-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.33-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.34-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.34-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.35-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.35-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.36-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.36-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.37-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.37-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.38-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.38-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.39-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.39-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.4-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.4-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.40-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.40-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.41-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.41-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.42-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.42-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.43-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.43-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.44-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.44-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.45-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.45-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.46-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.46-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.47-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.47-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.49-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.49-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.5-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.5-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.50-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.50-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.51-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.51-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.53-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.53-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.54-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.54-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.55-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.55-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.56-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.56-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.57-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.57-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.58-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.58-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.59-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.59-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.6-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.6-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.60-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.60-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.61-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.61-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.62-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.62-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.63-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.63-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.64-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.64-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.65-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.65-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.66-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.66-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.67-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.67-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.69-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.69-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.7-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.7-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.71-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.71-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.72-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.72-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.73-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.73-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.74-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.74-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.75-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.75-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.76-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.76-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.77-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.77-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.78-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.78-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.79-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.79-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.8-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.8-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.81-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.81-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.82-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.82-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.83-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.83-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.85-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.85-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.86-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.86-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.87-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.87-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.88-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.88-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.9-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.9-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.90-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.90-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.91-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.91-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.92-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.92-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.93-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.93-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.94-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.94-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.95-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.95-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.96-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.96-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.97-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.97-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.98-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.98-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.99-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.99-1\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.10.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.10.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.100.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.100.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.101.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.101.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.102.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.102.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.103.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.103.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.104.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.104.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.105.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.105.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.106.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.106.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.107.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.107.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.109.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.109.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.110.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.110.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.112.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.112.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.114.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.114.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.115.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.115.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.116.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.116.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.117.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.117.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.118.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.118.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.119.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.119.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.12.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.12.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.122.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.122.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.123.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.123.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.124.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.124.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.125.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.125.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.128.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.128.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.13.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.13.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.130.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.130.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.131.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.131.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.132.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.132.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.136.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.136.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.137.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.137.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.138.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.138.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.139.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.139.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.14.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.14.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.140.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.140.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.141.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.141.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.142.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.142.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.143.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.143.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.144.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.144.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.145.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.145.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.146.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.146.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.147.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.147.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.148.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.148.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.149.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.149.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.15.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.15.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.17.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.17.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.18.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.18.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.19.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.19.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.20.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.20.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.21.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.21.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.22.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.22.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.23.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.23.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.24.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.24.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.25.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.25.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.26.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.26.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.27.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.27.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.28.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.28.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.29.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.29.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.30.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.30.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.31.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.31.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.32.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.32.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.33.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.33.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.36.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.36.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.37.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.37.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.38.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.38.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.39.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.39.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.4.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.4.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.40.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.40.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.41.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.41.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.42.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.42.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.43.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.43.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.44.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.44.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.45.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.45.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.46.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.46.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.47.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.47.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.48.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.48.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.49.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.49.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.5.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.5.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.50.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.50.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.51.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.51.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.52.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.52.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.55.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.55.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.56.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.56.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.57.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.57.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.58.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.58.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.59.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.59.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.6.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.6.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.60.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.60.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.62.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.62.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.63.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.63.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.66.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.66.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.69.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.69.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.7.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.7.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.70.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.70.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.72.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.72.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.74.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.74.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.75.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.75.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.76.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.76.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.77.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.77.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.79.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.79.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.8.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.8.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.80.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.80.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.81.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.81.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.82.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.82.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.83.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.83.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.84.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.84.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.85.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.85.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.87.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.87.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.88.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.88.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.89.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.89.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.9.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.9.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.91.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.91.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.92.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.92.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.93.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.93.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.94.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.94.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.95.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.95.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.96.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.96.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.97.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.97.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.98.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.98.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.99.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.99.0\n * [new tag] cluster-autoscaler-0.5.2 -\u003e cluster-autoscaler-0.5.2\n * [new tag] cluster-autoscaler-0.5.3 -\u003e cluster-autoscaler-0.5.3\n * [new tag] cluster-autoscaler-0.5.4 -\u003e cluster-autoscaler-0.5.4\n * [new tag] cluster-autoscaler-0.6.0 -\u003e cluster-autoscaler-0.6.0\n * [new tag] cluster-autoscaler-0.6.1 -\u003e cluster-autoscaler-0.6.1\n * [new tag] cluster-autoscaler-0.6.2 -\u003e cluster-autoscaler-0.6.2\n * [new tag] cluster-autoscaler-0.6.3 -\u003e cluster-autoscaler-0.6.3\n * [new tag] cluster-autoscaler-0.6.4 -\u003e cluster-autoscaler-0.6.4\n * [new tag] cluster-autoscaler-1.0.0 -\u003e cluster-autoscaler-1.0.0\n * [new tag] cluster-autoscaler-1.0.1 -\u003e cluster-autoscaler-1.0.1\n * [new tag] cluster-autoscaler-1.0.2 -\u003e cluster-autoscaler-1.0.2\n * [new tag] cluster-autoscaler-1.0.3 -\u003e cluster-autoscaler-1.0.3\n * [new tag] cluster-autoscaler-1.0.4 -\u003e cluster-autoscaler-1.0.4\n * [new tag] cluster-autoscaler-1.0.5 -\u003e cluster-autoscaler-1.0.5\n * [new tag] cluster-autoscaler-1.1.0 -\u003e cluster-autoscaler-1.1.0\n * [new tag] cluster-autoscaler-1.1.1 -\u003e cluster-autoscaler-1.1.1\n * [new tag] cluster-autoscaler-1.1.2 -\u003e cluster-autoscaler-1.1.2\n * [new tag] cluster-autoscaler-1.2.0 -\u003e cluster-autoscaler-1.2.0\n * [new tag] cluster-autoscaler-1.2.1 -\u003e cluster-autoscaler-1.2.1\n * [new tag] cluster-autoscaler-1.2.2 -\u003e cluster-autoscaler-1.2.2\n * [new tag] v3.10.0 -\u003e v3.10.0\n * [new tag] v3.10.0-alpha.0 -\u003e v3.10.0-alpha.0\n * [new tag] v3.10.0-rc.0 -\u003e v3.10.0-rc.0\n * [new tag] v3.11 -\u003e v3.11\n * [new tag] v3.11.0 -\u003e v3.11.0\n * [new tag] v3.11.0-alpha.0 -\u003e v3.11.0-alpha.0\n * [new tag] vertical-pod-autoscaler-0.1 -\u003e vertical-pod-autoscaler-0.1\n","time":"2019-08-05T12:05:14Z"} {"command":"git fetch https://github.com/openshift/kubernetes-autoscaler.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/kubernetes-autoscaler\n * branch master -\u003e FETCH_HEAD\n","time":"2019-08-05T12:05:14Z"} {"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 49fac1f86... Merge pull request #112 from frobware/simplify-maxnodestotal-event\n","time":"2019-08-05T12:05:16Z"} {"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:16Z"} {"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-08-05T12:05:17Z"} {"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-08-05T12:05:17Z"} {"component":"clonerefs","file":"prow/cmd/clonerefs/main.go:43","func":"main.main","level":"info","msg":"Finished cloning refs","time":"2019-08-05T12:05:17Z"} + docker run -e 'JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-cluster-api-provider-kubemark-master-e2e","buildid":"54","prowjobid":"d2ff9911-b778-11e9-afc1-0a58ac10c12b","refs":{"org":"openshift","repo":"cluster-api-provider-kubemark","repo_link":"https://github.com/openshift/cluster-api-provider-kubemark","base_ref":"master","base_sha":"58acd13e6caa24f1f47942e2628a692fb5a8fdeb","base_link":"https://github.com/openshift/cluster-api-provider-kubemark/commit/58acd13e6caa24f1f47942e2628a692fb5a8fdeb","pulls":[{"number":27,"author":"frobware","sha":"e43c339c9c23beec199855e697eed29abdd194f0","link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27","commit_link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27/commits/e43c339c9c23beec199855e697eed29abdd194f0","author_link":"https://github.com/frobware"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/initupload:latest --clone-log=/data/clone.json --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-kubemark/27/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/54/clone-log.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-08-05T12:05:20Z"} {"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/54.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-08-05T12:05:20Z"} {"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-08-05T12:05:20Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-kubemark/27/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-08-05T12:05:20Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-kubemark/27/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/54/clone-records.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-08-05T12:05:20Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-kubemark/27/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/54/started.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-08-05T12:05:20Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-kubemark/27/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-08-05T12:05:20Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-kubemark/27/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/54/clone-records.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-08-05T12:05:20Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-kubemark/27/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/54/clone-log.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-08-05T12:05:20Z"} {"component":"initupload","dest":"pr-logs/pull/openshift_cluster-api-provider-kubemark/27/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/54/started.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-08-05T12:05:20Z"} {"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/54.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-08-05T12:05:20Z"} {"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-08-05T12:05:20Z"} {"component":"initupload","file":"prow/gcsupload/run.go:65","func":"k8s.io/test-infra/prow/gcsupload.Options.Run","level":"info","msg":"Finished upload to GCS","time":"2019-08-05T12:05:20Z"} + sudo chmod -R a+rwX /data + sudo chown -R origin:origin-git /data + set +o xtrace ########## FINISHED STAGE: SUCCESS: SYNC REPOSITORIES [00h 01m 18s] ########## [workspace] $ /bin/bash /tmp/jenkins8573558204388846062.sh ########## STARTING STAGE: FORWARD PARAMETERS TO THE REMOTE HOST ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod o+rw /etc/environment + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_NAME=pull-ci-openshift-cluster-api-provider-kubemark-master-e2e'\'' >> /etc/environment' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_NUMBER=54'\'' >> /etc/environment' + set +o xtrace ########## FINISHED STAGE: SUCCESS: FORWARD PARAMETERS TO THE REMOTE HOST [00h 00m 01s] ########## [workspace] $ /bin/bash /tmp/jenkins4228537654317744424.sh ########## STARTING STAGE: INSTALL MINIKUBE ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.ERof7fzuEf + cat + chmod +x /tmp/tmp.ERof7fzuEf + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.ERof7fzuEf openshiftdevel:/tmp/tmp.ERof7fzuEf + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.ERof7fzuEf"' + cd /home/origin + curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.30.0/minikube-linux-amd64 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 40.3M 100 40.3M 0 0 33.6M 0 0:00:01 0:00:01 --:--:-- 33.6M + chmod +x minikube + sudo mv minikube /usr/bin/ + curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.10.0/bin/linux/amd64/kubectl % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 43 51.7M 43 22.7M 0 0 49.8M 0 0:00:01 --:--:-- 0:00:01 49.7M 100 51.7M 100 51.7M 0 0 69.6M 0 --:--:-- --:--:-- --:--:-- 69.6M + chmod +x kubectl + sudo mv kubectl /usr/bin/ + sudo yum install -y ebtables Loaded plugins: amazon-id, rhui-lb, search-disabled-repos Resolving Dependencies --> Running transaction check ---> Package ebtables.x86_64 0:2.0.10-16.el7 will be installed --> Finished Dependency Resolution Dependencies Resolved ================================================================================ Package Arch Version Repository Size ================================================================================ Installing: ebtables x86_64 2.0.10-16.el7 oso-rhui-rhel-server-releases 123 k Transaction Summary ================================================================================ Install 1 Package Total download size: 123 k Installed size: 343 k Downloading packages: Running transaction check Running transaction test Transaction test succeeded Running transaction Installing : ebtables-2.0.10-16.el7.x86_64 1/1 Verifying : ebtables-2.0.10-16.el7.x86_64 1/1 Installed: ebtables.x86_64 0:2.0.10-16.el7 Complete! + VERSION=v1.13.0 + wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz --2019-08-05 12:06:37-- https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz Resolving github.com (github.com)... 140.82.113.4 Connecting to github.com (github.com)|140.82.113.4|:443... connected. HTTP request sent, awaiting response... 302 Found Location: https://github-production-release-asset-2e65be.s3.amazonaws.com/80172100/61627180-fed9-11e8-9958-15e7eb90aa9e?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20190805%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20190805T120637Z&X-Amz-Expires=300&X-Amz-Signature=f4aae60f2a37931f473e39dc836e31660039cc15d5530c4869292ed1afd3e137&X-Amz-SignedHeaders=host&actor_id=0&response-content-disposition=attachment%3B%20filename%3Dcrictl-v1.13.0-linux-amd64.tar.gz&response-content-type=application%2Foctet-stream [following] --2019-08-05 12:06:37-- https://github-production-release-asset-2e65be.s3.amazonaws.com/80172100/61627180-fed9-11e8-9958-15e7eb90aa9e?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20190805%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20190805T120637Z&X-Amz-Expires=300&X-Amz-Signature=f4aae60f2a37931f473e39dc836e31660039cc15d5530c4869292ed1afd3e137&X-Amz-SignedHeaders=host&actor_id=0&response-content-disposition=attachment%3B%20filename%3Dcrictl-v1.13.0-linux-amd64.tar.gz&response-content-type=application%2Foctet-stream Resolving github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)... 52.216.93.131 Connecting to github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)|52.216.93.131|:443... connected. HTTP request sent, awaiting response... 200 OK Length: 10631149 (10M) [application/octet-stream] Saving to: ‘crictl-v1.13.0-linux-amd64.tar.gz’ 0K .......... .......... .......... .......... .......... 0% 92.7M 0s 50K .......... .......... .......... .......... .......... 0% 100M 0s 100K .......... .......... .......... .......... .......... 1% 79.1M 0s 150K .......... .......... .......... .......... .......... 1% 95.3M 0s 200K .......... .......... .......... .......... .......... 2% 130M 0s 250K .......... .......... .......... .......... .......... 2% 79.2M 0s 300K .......... .......... .......... .......... .......... 3% 98.9M 0s 350K .......... .......... .......... .......... .......... 3% 113M 0s 400K .......... .......... .......... .......... .......... 4% 105M 0s 450K .......... .......... .......... .......... .......... 4% 99.8M 0s 500K .......... .......... .......... .......... .......... 5% 113M 0s 550K .......... .......... .......... .......... .......... 5% 97.1M 0s 600K .......... .......... .......... .......... .......... 6% 99.4M 0s 650K .......... .......... .......... .......... .......... 6% 104M 0s 700K .......... .......... .......... .......... .......... 7% 99.4M 0s 750K .......... .......... .......... .......... .......... 7% 95.2M 0s 800K .......... .......... .......... .......... .......... 8% 99.0M 0s 850K .......... .......... .......... .......... .......... 8% 99.2M 0s 900K .......... .......... .......... .......... .......... 9% 103M 0s 950K .......... .......... .......... .......... .......... 9% 93.9M 0s 1000K .......... .......... .......... .......... .......... 10% 103M 0s 1050K .......... .......... .......... .......... .......... 10% 99.1M 0s 1100K .......... .......... .......... .......... .......... 11% 118M 0s 1150K .......... .......... .......... .......... .......... 11% 96.4M 0s 1200K .......... .......... .......... .......... .......... 12% 100M 0s 1250K .......... .......... .......... .......... .......... 12% 89.1M 0s 1300K .......... .......... .......... .......... .......... 13% 88.4M 0s 1350K .......... .......... .......... .......... .......... 13% 92.5M 0s 1400K .......... .......... .......... .......... .......... 13% 116M 0s 1450K .......... .......... .......... .......... .......... 14% 104M 0s 1500K .......... .......... .......... .......... .......... 14% 130M 0s 1550K .......... .......... .......... .......... .......... 15% 102M 0s 1600K .......... .......... .......... .......... .......... 15% 118M 0s 1650K .......... .......... .......... .......... .......... 16% 102M 0s 1700K .......... .......... .......... .......... .......... 16% 90.6M 0s 1750K .......... .......... .......... .......... .......... 17% 112M 0s 1800K .......... .......... .......... .......... .......... 17% 96.4M 0s 1850K .......... .......... .......... .......... .......... 18% 101M 0s 1900K .......... .......... .......... .......... .......... 18% 115M 0s 1950K .......... .......... .......... .......... .......... 19% 120M 0s 2000K .......... .......... .......... .......... .......... 19% 91.4M 0s 2050K .......... .......... .......... .......... .......... 20% 100M 0s 2100K .......... .......... .......... .......... .......... 20% 98.8M 0s 2150K .......... .......... .......... .......... .......... 21% 164M 0s 2200K .......... .......... .......... .......... .......... 21% 174M 0s 2250K .......... .......... .......... .......... .......... 22% 104M 0s 2300K .......... .......... .......... .......... .......... 22% 94.0M 0s 2350K .......... .......... .......... .......... .......... 23% 111M 0s 2400K .......... .......... .......... .......... .......... 23% 98.7M 0s 2450K .......... .......... .......... .......... .......... 24% 100M 0s 2500K .......... .......... .......... .......... .......... 24% 104M 0s 2550K .......... .......... .......... .......... .......... 25% 97.7M 0s 2600K .......... .......... .......... .......... .......... 25% 115M 0s 2650K .......... .......... .......... .......... .......... 26% 90.3M 0s 2700K .......... .......... .......... .......... .......... 26% 110M 0s 2750K .......... .......... .......... .......... .......... 26% 102M 0s 2800K .......... .......... .......... .......... .......... 27% 119M 0s 2850K .......... .......... .......... .......... .......... 27% 115M 0s 2900K .......... .......... .......... .......... .......... 28% 102M 0s 2950K .......... .......... .......... .......... .......... 28% 101M 0s 3000K .......... .......... .......... .......... .......... 29% 99.4M 0s 3050K .......... .......... .......... .......... .......... 29% 104M 0s 3100K .......... .......... .......... .......... .......... 30% 96.8M 0s 3150K .......... .......... .......... .......... .......... 30% 113M 0s 3200K .......... .......... .......... .......... .......... 31% 111M 0s 3250K .......... .......... .......... .......... .......... 31% 104M 0s 3300K .......... .......... .......... .......... .......... 32% 102M 0s 3350K .......... .......... .......... .......... .......... 32% 99.8M 0s 3400K .......... .......... .......... .......... .......... 33% 99.0M 0s 3450K .......... .......... .......... .......... .......... 33% 99.0M 0s 3500K .......... .......... .......... .......... .......... 34% 98.3M 0s 3550K .......... .......... .......... .......... .......... 34% 102M 0s 3600K .......... .......... .......... .......... .......... 35% 104M 0s 3650K .......... .......... .......... .......... .......... 35% 121M 0s 3700K .......... .......... .......... .......... .......... 36% 112M 0s 3750K .......... .......... .......... .......... .......... 36% 101M 0s 3800K .......... .......... .......... .......... .......... 37% 101M 0s 3850K .......... .......... .......... .......... .......... 37% 113M 0s 3900K .......... .......... .......... .......... .......... 38% 102M 0s 3950K .......... .......... .......... .......... .......... 38% 104M 0s 4000K .......... .......... .......... .......... .......... 39% 111M 0s 4050K .......... .......... .......... .......... .......... 39% 116M 0s 4100K .......... .......... .......... .......... .......... 39% 105M 0s 4150K .......... .......... .......... .......... .......... 40% 98.5M 0s 4200K .......... .......... .......... .......... .......... 40% 110M 0s 4250K .......... .......... .......... .......... .......... 41% 109M 0s 4300K .......... .......... .......... .......... .......... 41% 131M 0s 4350K .......... .......... .......... .......... .......... 42% 166M 0s 4400K .......... .......... .......... .......... .......... 42% 166M 0s 4450K .......... .......... .......... .......... .......... 43% 167M 0s 4500K .......... .......... .......... .......... .......... 43% 20.1M 0s 4550K .......... .......... .......... .......... .......... 44% 95.6M 0s 4600K .......... .......... .......... .......... .......... 44% 91.0M 0s 4650K .......... .......... .......... .......... .......... 45% 148M 0s 4700K .......... .......... .......... .......... .......... 45% 173M 0s 4750K .......... .......... .......... .......... .......... 46% 99.9M 0s 4800K .......... .......... .......... .......... .......... 46% 96.2M 0s 4850K .......... .......... .......... .......... .......... 47% 109M 0s 4900K .......... .......... .......... .......... .......... 47% 102M 0s 4950K .......... .......... .......... .......... .......... 48% 114M 0s 5000K .......... .......... .......... .......... .......... 48% 104M 0s 5050K .......... .......... .......... .......... .......... 49% 102M 0s 5100K .......... .......... .......... .......... .......... 49% 97.3M 0s 5150K .......... .......... .......... .......... .......... 50% 102M 0s 5200K .......... .......... .......... .......... .......... 50% 103M 0s 5250K .......... .......... .......... .......... .......... 51% 111M 0s 5300K .......... .......... .......... .......... .......... 51% 119M 0s 5350K .......... .......... .......... .......... .......... 52% 97.0M 0s 5400K .......... .......... .......... .......... .......... 52% 98.8M 0s 5450K .......... .......... .......... .......... .......... 52% 106M 0s 5500K .......... .......... .......... .......... .......... 53% 114M 0s 5550K .......... .......... .......... .......... .......... 53% 97.6M 0s 5600K .......... .......... .......... .......... .......... 54% 117M 0s 5650K .......... .......... .......... .......... .......... 54% 101M 0s 5700K .......... .......... .......... .......... .......... 55% 117M 0s 5750K .......... .......... .......... .......... .......... 55% 109M 0s 5800K .......... .......... .......... .......... .......... 56% 108M 0s 5850K .......... .......... .......... .......... .......... 56% 73.7M 0s 5900K .......... .......... .......... .......... .......... 57% 86.5M 0s 5950K .......... .......... .......... .......... .......... 57% 84.2M 0s 6000K .......... .......... .......... .......... .......... 58% 70.6M 0s 6050K .......... .......... .......... .......... .......... 58% 90.5M 0s 6100K .......... .......... .......... .......... .......... 59% 91.2M 0s 6150K .......... .......... .......... .......... .......... 59% 74.4M 0s 6200K .......... .......... .......... .......... .......... 60% 83.3M 0s 6250K .......... .......... .......... .......... .......... 60% 86.9M 0s 6300K .......... .......... .......... .......... .......... 61% 74.7M 0s 6350K .......... .......... .......... .......... .......... 61% 84.5M 0s 6400K .......... .......... .......... .......... .......... 62% 86.4M 0s 6450K .......... .......... .......... .......... .......... 62% 82.8M 0s 6500K .......... .......... .......... .......... .......... 63% 76.1M 0s 6550K .......... .......... .......... .......... .......... 63% 111M 0s 6600K .......... .......... .......... .......... .......... 64% 85.1M 0s 6650K .......... .......... .......... .......... .......... 64% 74.0M 0s 6700K .......... .......... .......... .......... .......... 65% 13.5M 0s 6750K .......... .......... .......... .......... .......... 65% 103M 0s 6800K .......... .......... .......... .......... .......... 65% 97.3M 0s 6850K .......... .......... .......... .......... .......... 66% 104M 0s 6900K .......... .......... .......... .......... .......... 66% 102M 0s 6950K .......... .......... .......... .......... .......... 67% 98.6M 0s 7000K .......... .......... .......... .......... .......... 67% 111M 0s 7050K .......... .......... .......... .......... .......... 68% 110M 0s 7100K .......... .......... .......... .......... .......... 68% 109M 0s 7150K .......... .......... .......... .......... .......... 69% 100M 0s 7200K .......... .......... .......... .......... .......... 69% 109M 0s 7250K .......... .......... .......... .......... .......... 70% 107M 0s 7300K .......... .......... .......... .......... .......... 70% 100M 0s 7350K .......... .......... .......... .......... .......... 71% 99.9M 0s 7400K .......... .......... .......... .......... .......... 71% 115M 0s 7450K .......... .......... .......... .......... .......... 72% 108M 0s 7500K .......... .......... .......... .......... .......... 72% 115M 0s 7550K .......... .......... .......... .......... .......... 73% 101M 0s 7600K .......... .......... .......... .......... .......... 73% 110M 0s 7650K .......... .......... .......... .......... .......... 74% 100M 0s 7700K .......... .......... .......... .......... .......... 74% 106M 0s 7750K .......... .......... .......... .......... .......... 75% 98.4M 0s 7800K .......... .......... .......... .......... .......... 75% 99.0M 0s 7850K .......... .......... .......... .......... .......... 76% 116M 0s 7900K .......... .......... .......... .......... .......... 76% 98.3M 0s 7950K .......... .......... .......... .......... .......... 77% 105M 0s 8000K .......... .......... .......... .......... .......... 77% 94.3M 0s 8050K .......... .......... .......... .......... .......... 78% 106M 0s 8100K .......... .......... .......... .......... .......... 78% 110M 0s 8150K .......... .......... .......... .......... .......... 78% 99.6M 0s 8200K .......... .......... .......... .......... .......... 79% 100M 0s 8250K .......... .......... .......... .......... .......... 79% 113M 0s 8300K .......... .......... .......... .......... .......... 80% 98.8M 0s 8350K .......... .......... .......... .......... .......... 80% 86.2M 0s 8400K .......... .......... .......... .......... .......... 81% 75.1M 0s 8450K .......... .......... .......... .......... .......... 81% 80.0M 0s 8500K .......... .......... .......... .......... .......... 82% 89.2M 0s 8550K .......... .......... .......... .......... .......... 82% 82.2M 0s 8600K .......... .......... .......... .......... .......... 83% 77.4M 0s 8650K .......... .......... .......... .......... .......... 83% 86.9M 0s 8700K .......... .......... .......... .......... .......... 84% 88.7M 0s 8750K .......... .......... .......... .......... .......... 84% 85.6M 0s 8800K .......... .......... .......... .......... .......... 85% 73.7M 0s 8850K .......... .......... .......... .......... .......... 85% 86.9M 0s 8900K .......... .......... .......... .......... .......... 86% 73.8M 0s 8950K .......... .......... .......... .......... .......... 86% 84.9M 0s 9000K .......... .......... .......... .......... .......... 87% 85.5M 0s 9050K .......... .......... .......... .......... .......... 87% 83.3M 0s 9100K .......... .......... .......... .......... .......... 88% 83.5M 0s 9150K .......... .......... .......... .......... .......... 88% 96.0M 0s 9200K .......... .......... .......... .......... .......... 89% 81.9M 0s 9250K .......... .......... .......... .......... .......... 89% 79.1M 0s 9300K .......... .......... .......... .......... .......... 90% 80.1M 0s 9350K .......... .......... .......... .......... .......... 90% 90.8M 0s 9400K .......... .......... .......... .......... .......... 91% 73.8M 0s 9450K .......... .......... .......... .......... .......... 91% 85.4M 0s 9500K .......... .......... .......... .......... .......... 91% 85.8M 0s 9550K .......... .......... .......... .......... .......... 92% 76.0M 0s 9600K .......... .......... .......... .......... .......... 92% 76.0M 0s 9650K .......... .......... .......... .......... .......... 93% 84.9M 0s 9700K .......... .......... .......... .......... .......... 93% 86.2M 0s 9750K .......... .......... .......... .......... .......... 94% 73.8M 0s 9800K .......... .......... .......... .......... .......... 94% 83.3M 0s 9850K .......... .......... .......... .......... .......... 95% 87.7M 0s 9900K .......... .......... .......... .......... .......... 95% 83.7M 0s 9950K .......... .......... .......... .......... .......... 96% 98.0M 0s 10000K .......... .......... .......... .......... .......... 96% 88.3M 0s 10050K .......... .......... .......... .......... .......... 97% 93.3M 0s 10100K .......... .......... .......... .......... .......... 97% 74.1M 0s 10150K .......... .......... .......... .......... .......... 98% 80.1M 0s 10200K .......... .......... .......... .......... .......... 98% 89.7M 0s 10250K .......... .......... .......... .......... .......... 99% 80.8M 0s 10300K .......... .......... .......... .......... .......... 99% 78.8M 0s 10350K .......... .......... .......... . 100% 109M=0.1s 2019-08-05 12:06:37 (93.3 MB/s) - ‘crictl-v1.13.0-linux-amd64.tar.gz’ saved [10631149/10631149] + sudo tar zxvf crictl-v1.13.0-linux-amd64.tar.gz -C /usr/bin crictl + rm -f crictl-v1.13.0-linux-amd64.tar.gz + set +o xtrace ########## FINISHED STAGE: SUCCESS: INSTALL MINIKUBE [00h 01m 13s] ########## [workspace] $ /bin/bash /tmp/jenkins8899001969749211588.sh ########## STARTING STAGE: DEPLOY KUBERNETES ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.Cv21c3wxqn + cat + chmod +x /tmp/tmp.Cv21c3wxqn + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.Cv21c3wxqn openshiftdevel:/tmp/tmp.Cv21c3wxqn + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.Cv21c3wxqn"' + cd /home/origin + sudo setenforce 0 + sudo minikube start --vm-driver=none --extra-config=kubelet.cgroup-driver=systemd --kubernetes-version v1.12.0 --v 5 There is a newer version of minikube available (v1.2.0). Download it here: https://github.com/kubernetes/minikube/releases/tag/v1.2.0 To disable this notification, run the following: minikube config set WantUpdateNotification false Starting local Kubernetes v1.12.0 cluster... Starting VM... Creating CA: /root/.minikube/certs/ca.pem Creating client certificate: /root/.minikube/certs/cert.pem Getting VM IP address... Moving files into cluster... Downloading kubeadm v1.12.0 Downloading kubelet v1.12.0 Finished Downloading kubeadm v1.12.0 Finished Downloading kubelet v1.12.0 Setting up certs... Connecting to cluster... Setting up kubeconfig... Starting cluster components... Kubectl is now configured to use the cluster. =================== WARNING: IT IS RECOMMENDED NOT TO RUN THE NONE DRIVER ON PERSONAL WORKSTATIONS The 'none' driver will run an insecure kubernetes apiserver as root that may leave the host vulnerable to CSRF attacks When using the none driver, the kubectl config and credentials generated will be root owned and will appear in the root home directory. You will need to move the files to the appropriate location and then set the correct permissions. An example of this is below: sudo mv /root/.kube $HOME/.kube # this will write over any previous configuration sudo chown -R $USER $HOME/.kube sudo chgrp -R $USER $HOME/.kube sudo mv /root/.minikube $HOME/.minikube # this will write over any previous configuration sudo chown -R $USER $HOME/.minikube sudo chgrp -R $USER $HOME/.minikube This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true Loading cached images from config file. + sudo cp /etc/ssl/certs/ca-bundle.crt /etc/ssl/certs/ca-certificates.crt + set +o xtrace ########## FINISHED STAGE: SUCCESS: DEPLOY KUBERNETES [00h 01m 05s] ########## [workspace] $ /bin/bash /tmp/jenkins1683972096015572475.sh ########## STARTING STAGE: INSTALL KUSTOMIZE ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.kju0llQmHg + cat + chmod +x /tmp/tmp.kju0llQmHg + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.kju0llQmHg openshiftdevel:/tmp/tmp.kju0llQmHg + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.kju0llQmHg"' + cd /home/origin + curl -Lo kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v2.1.0/kustomize_2.1.0_linux_amd64 % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 618 0 618 0 0 2970 0 --:--:-- --:--:-- --:--:-- 2985 9 22.9M 9 2316k 0 0 6442k 0 0:00:03 --:--:-- 0:00:03 6442k 100 22.9M 100 22.9M 0 0 40.9M 0 --:--:-- --:--:-- --:--:-- 103M + chmod u+x kustomize + sudo mv kustomize /usr/bin/kustomize + set +o xtrace ########## FINISHED STAGE: SUCCESS: INSTALL KUSTOMIZE [00h 00m 01s] ########## [workspace] $ /bin/bash /tmp/jenkins5896811254874749801.sh ########## STARTING STAGE: INSTALL IMAGEBUILDER ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.wKW5fuGdho + cat + chmod +x /tmp/tmp.wKW5fuGdho + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.wKW5fuGdho openshiftdevel:/tmp/tmp.wKW5fuGdho + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.wKW5fuGdho"' + cd /home/origin + export GOPATH=/data + GOPATH=/data + go get -u github.com/openshift/imagebuilder/cmd/imagebuilder + sudo mv /data/bin/imagebuilder /usr/bin + set +o xtrace ########## FINISHED STAGE: SUCCESS: INSTALL IMAGEBUILDER [00h 00m 23s] ########## [workspace] $ /bin/bash /tmp/jenkins2494585921880228106.sh ########## STARTING STAGE: BUILD KUBEMARK MACHINE CONTROLLERS ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.5odaItRih1 + cat + chmod +x /tmp/tmp.5odaItRih1 + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.5odaItRih1 openshiftdevel:/tmp/tmp.5odaItRih1 + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.5odaItRih1"' + cd /home/origin + export GOPATH=/data + GOPATH=/data + cd /data/src/github.com/openshift/cluster-api-provider-kubemark + go get -u github.com/openshift/imagebuilder/cmd/imagebuilder + sudo mv /data/bin/imagebuilder /usr/bin + sudo make images IMAGE=docker.io/gofed/kubemark-machine-controllers VERSION=v1.0 NO_DOCKER=1 imagebuilder -t "docker.io/gofed/kubemark-machine-controllers:v1.0" -t "docker.io/gofed/kubemark-machine-controllers:latest" ./ --> Image registry.svc.ci.openshift.org/openshift/release:golang-1.10 was not found, pulling ... --> Pulled 0/2 layers, 15% complete --> Pulled 1/2 layers, 70% complete --> Pulled 2/2 layers, 100% complete --> Extracting --> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.10 as builder --> WORKDIR /go/src/github.com/openshift/cluster-api-provider-kubemark --> COPY . . --> RUN go build -o ./machine-controller-manager ./cmd/manager --> RUN go build -o ./manager ./vendor/github.com/openshift/cluster-api/cmd/manager --> Image docker.io/gofed/base:baseci was not found, pulling ... --> Pulled 1/2 layers, 74% complete --> Pulled 2/2 layers, 100% complete --> Extracting --> FROM docker.io/gofed/base:baseci as 1 --> RUN INSTALL_PKGS=" openssh " && yum install -y $INSTALL_PKGS && rpm -V $INSTALL_PKGS && yum clean all && curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x ./kubectl && mv ./kubectl /bin/kubectl && curl -LO https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 && chmod +x ./jq-linux64 && mv ./jq-linux64 /bin/jq Loaded plugins: fastestmirror, ovl Determining fastest mirrors * base: mirror.ash.fastserv.com * extras: mirror.ash.fastserv.com * updates: mirror.ash.fastserv.com Resolving Dependencies --> Running transaction check ---> Package openssh.x86_64 0:7.4p1-16.el7 will be installed --> Processing Dependency: libfipscheck.so.1()(64bit) for package: openssh-7.4p1-16.el7.x86_64 --> Running transaction check ---> Package fipscheck-lib.x86_64 0:1.4.1-6.el7 will be installed --> Processing Dependency: /usr/bin/fipscheck for package: fipscheck-lib-1.4.1-6.el7.x86_64 --> Running transaction check ---> Package fipscheck.x86_64 0:1.4.1-6.el7 will be installed --> Finished Dependency Resolution Dependencies Resolved ================================================================================ Package Arch Version Repository Size ================================================================================ Installing: openssh x86_64 7.4p1-16.el7 base 510 k Installing for dependencies: fipscheck x86_64 1.4.1-6.el7 base 21 k fipscheck-lib x86_64 1.4.1-6.el7 base 11 k Transaction Summary ================================================================================ Install 1 Package (+2 Dependent packages) Total download size: 542 k Installed size: 2.0 M Downloading packages: -------------------------------------------------------------------------------- Total 3.6 MB/s | 542 kB 00:00 Running transaction check Running transaction test Transaction test succeeded Running transaction Installing : fipscheck-1.4.1-6.el7.x86_64 1/3 Installing : fipscheck-lib-1.4.1-6.el7.x86_64 2/3 Installing : openssh-7.4p1-16.el7.x86_64 3/3 Verifying : fipscheck-lib-1.4.1-6.el7.x86_64 1/3 Verifying : fipscheck-1.4.1-6.el7.x86_64 2/3 Verifying : openssh-7.4p1-16.el7.x86_64 3/3 Installed: openssh.x86_64 0:7.4p1-16.el7 Dependency Installed: fipscheck.x86_64 0:1.4.1-6.el7 fipscheck-lib.x86_64 0:1.4.1-6.el7 Complete! Loaded plugins: fastestmirror, ovl Cleaning repos: base cbs-paas7-openshift-multiarch-el7-build extras updates Cleaning up list of fastest mirrors % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 40.9M 100 40.9M 0 0 67.2M 0 --:--:-- --:--:-- --:--:-- 67.3M % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 100 599 0 599 0 0 2519 0 --:--:-- --:--:-- --:--:-- 2516 100 2956k 100 2956k 0 0 9084k 0 --:--:-- --:--:-- --:--:-- 9084k --> COPY --from=builder /go/src/github.com/openshift/cluster-api-provider-kubemark/manager / --> COPY --from=builder /go/src/github.com/openshift/cluster-api-provider-kubemark/machine-controller-manager / --> Committing changes to docker.io/gofed/kubemark-machine-controllers:v1.0 ... --> Tagged as docker.io/gofed/kubemark-machine-controllers:latest --> Done + set +o xtrace ########## FINISHED STAGE: SUCCESS: BUILD KUBEMARK MACHINE CONTROLLERS [00h 01m 33s] ########## [workspace] $ /bin/bash /tmp/jenkins7460812305283908929.sh ########## STARTING STAGE: BUILD CLUSTER AUTOSCALER ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.WRYEuOB2Fo + cat + chmod +x /tmp/tmp.WRYEuOB2Fo + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.WRYEuOB2Fo openshiftdevel:/tmp/tmp.WRYEuOB2Fo + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.WRYEuOB2Fo"' + cd /home/origin + export GOPATH=/data + GOPATH=/data + cd /data/src/github.com/openshift/kubernetes-autoscaler + sudo imagebuilder -f images/cluster-autoscaler/Dockerfile -t docker.io/openshift/origin-cluster-autoscaler:v4.0 . --> Image registry.svc.ci.openshift.org/openshift/release:golang-1.12 was not found, pulling ... --> Pulled 1/2 layers, 64% complete --> Pulled 2/2 layers, 100% complete --> Extracting --> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder --> WORKDIR /go/src/k8s.io/autoscaler --> COPY . . --> RUN go build -o cluster-autoscaler/cluster-autoscaler ./cluster-autoscaler --> Image registry.svc.ci.openshift.org/openshift/origin-v4.0:base was not found, pulling ... --> Pulled 3/4 layers, 75% complete --> Pulled 4/4 layers, 100% complete --> Extracting --> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1 --> COPY --from=builder /go/src/k8s.io/autoscaler/cluster-autoscaler/cluster-autoscaler /usr/bin/ --> CMD /usr/bin/cluster-autoscaler --> LABEL summary="Cluster Autoscaler for OpenShift and Kubernetes" --> Committing changes to docker.io/openshift/origin-cluster-autoscaler:v4.0 ... --> Done + set +o xtrace ########## FINISHED STAGE: SUCCESS: BUILD CLUSTER AUTOSCALER [00h 02m 22s] ########## [workspace] $ /bin/bash /tmp/jenkins1624454826683774104.sh ########## STARTING STAGE: DEPLOY MACHINE API OPERATOR ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.DbJ1fVfQmJ + cat + chmod +x /tmp/tmp.DbJ1fVfQmJ + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.DbJ1fVfQmJ openshiftdevel:/tmp/tmp.DbJ1fVfQmJ + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.DbJ1fVfQmJ"' + cd /home/origin + export GOPATH=/data + GOPATH=/data + cd /data/src/github.com/openshift/machine-api-operator + make build docker run --rm -v "/data/src/github.com/openshift/machine-api-operator":/go/src/github.com/openshift/machine-api-operator:Z -w /go/src/github.com/openshift/machine-api-operator golang:1.12 ./hack/go-build.sh machine-api-operator Unable to find image 'golang:1.12' locally Trying to pull repository registry.access.redhat.com/golang ... Pulling repository registry.access.redhat.com/golang Trying to pull repository docker.io/library/golang ... 1.12: Pulling from docker.io/library/golang 5ae19949497e: Pulling fs layer ed3d96a2798e: Pulling fs layer f12136850781: Pulling fs layer 1a9ad5d5550b: Pulling fs layer efbd5496b163: Pulling fs layer c01c378f53ca: Pulling fs layer c7aef280980d: Pulling fs layer 1a9ad5d5550b: Waiting efbd5496b163: Waiting c01c378f53ca: Waiting c7aef280980d: Waiting f12136850781: Verifying Checksum f12136850781: Download complete ed3d96a2798e: Verifying Checksum ed3d96a2798e: Download complete 5ae19949497e: Verifying Checksum 5ae19949497e: Download complete efbd5496b163: Verifying Checksum efbd5496b163: Download complete 1a9ad5d5550b: Verifying Checksum 1a9ad5d5550b: Download complete c7aef280980d: Verifying Checksum c7aef280980d: Download complete c01c378f53ca: Verifying Checksum c01c378f53ca: Download complete 5ae19949497e: Pull complete ed3d96a2798e: Pull complete f12136850781: Pull complete 1a9ad5d5550b: Pull complete efbd5496b163: Pull complete c01c378f53ca: Pull complete c7aef280980d: Pull complete Digest: sha256:f5486a917b57f8b14be4345604bc4654147416a327d6d63271a0c52c907001c4 Status: Downloaded newer image for docker.io/golang:1.12 Using version from git... Building github.com/openshift/machine-api-operator/cmd/machine-api-operator (v0.1.0-479-g1b52288d) docker run --rm -v "/data/src/github.com/openshift/machine-api-operator":/go/src/github.com/openshift/machine-api-operator:Z -w /go/src/github.com/openshift/machine-api-operator golang:1.12 ./hack/go-build.sh nodelink-controller Using version from git... Building github.com/openshift/machine-api-operator/cmd/nodelink-controller (v0.1.0-479-g1b52288d) docker run --rm -v "/data/src/github.com/openshift/machine-api-operator":/go/src/github.com/openshift/machine-api-operator:Z -w /go/src/github.com/openshift/machine-api-operator golang:1.12 ./hack/go-build.sh machine-healthcheck Using version from git... Building github.com/openshift/machine-api-operator/cmd/machine-healthcheck (v0.1.0-479-g1b52288d) + sudo imagebuilder -t docker.io/openshift/origin-machine-api-operator:v4.0.0 . --> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder --> WORKDIR /go/src/github.com/openshift/machine-api-operator --> COPY . . --> RUN NO_DOCKER=1 make build ./hack/go-build.sh machine-api-operator Using version from git... Building github.com/openshift/machine-api-operator/cmd/machine-api-operator (v0.1.0-479-g1b52288d) ./hack/go-build.sh nodelink-controller Using version from git... Building github.com/openshift/machine-api-operator/cmd/nodelink-controller (v0.1.0-479-g1b52288d) ./hack/go-build.sh machine-healthcheck Using version from git... Building github.com/openshift/machine-api-operator/cmd/machine-healthcheck (v0.1.0-479-g1b52288d) --> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1 --> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/install manifests --> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/machine-api-operator . --> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/nodelink-controller . --> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/machine-healthcheck . --> LABEL io.openshift.release.operator true --> Committing changes to docker.io/openshift/origin-machine-api-operator:v4.0.0 ... --> Done + sudo make deploy-kubemark kustomize build config | kubectl apply -f - namespace "kubemark-actuator" created serviceaccount "kubemark" created clusterrole.rbac.authorization.k8s.io "kubemark-actuator-role" created clusterrolebinding.rbac.authorization.k8s.io "kubemark-actuator-rolebinding" created configmap "deleteunreadynodes" created deployment.apps "machineapi-kubemark-controllers" created kustomize build | kubectl apply -f - namespace "openshift-machine-api" created customresourcedefinition.apiextensions.k8s.io "clusteroperators.config.openshift.io" created customresourcedefinition.apiextensions.k8s.io "featuregates.config.openshift.io" created customresourcedefinition.apiextensions.k8s.io "machinedisruptionbudgets.healthchecking.openshift.io" created customresourcedefinition.apiextensions.k8s.io "machinehealthchecks.healthchecking.openshift.io" created customresourcedefinition.apiextensions.k8s.io "machines.machine.openshift.io" created customresourcedefinition.apiextensions.k8s.io "machinesets.machine.openshift.io" created customresourcedefinition.apiextensions.k8s.io "prometheusrules.monitoring.coreos.com" created customresourcedefinition.apiextensions.k8s.io "servicemonitors.monitoring.coreos.com" created serviceaccount "machine-api-controllers" created serviceaccount "machine-api-operator" created role.rbac.authorization.k8s.io "machine-api-controllers" created role.rbac.authorization.k8s.io "machine-api-operator" created role.rbac.authorization.k8s.io "prometheus-k8s-machine-api-operator" created clusterrole.rbac.authorization.k8s.io "machine-api-controllers" created clusterrole.rbac.authorization.k8s.io "machine-api-operator" created rolebinding.rbac.authorization.k8s.io "machine-api-controllers" created rolebinding.rbac.authorization.k8s.io "machine-api-operator" created rolebinding.rbac.authorization.k8s.io "prometheus-k8s-machine-api-operator" created clusterrolebinding.rbac.authorization.k8s.io "machine-api-controllers" created clusterrolebinding.rbac.authorization.k8s.io "machine-api-operator" created configmap "machine-api-operator-images" created service "machine-api-operator" created deployment.apps "machine-api-operator" created clusteroperator.config.openshift.io "machine-api" created kubectl apply -f config/kubemark-config-infra.yaml customresourcedefinition.apiextensions.k8s.io "infrastructures.config.openshift.io" created infrastructure.config.openshift.io "cluster" created + set +o xtrace ########## FINISHED STAGE: SUCCESS: DEPLOY MACHINE API OPERATOR [00h 03m 40s] ########## [workspace] $ /bin/bash /tmp/jenkins5755636012808898388.sh ########## STARTING STAGE: DEPLOY CLUSTER AUTOSCALER OPERATOR ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.gwmXSiZ0Fe + cat + chmod +x /tmp/tmp.gwmXSiZ0Fe + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.gwmXSiZ0Fe openshiftdevel:/tmp/tmp.gwmXSiZ0Fe + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.gwmXSiZ0Fe"' + cd /home/origin + export GOPATH=/data + GOPATH=/data + cd /data/src/github.com/openshift/cluster-autoscaler-operator/ + sudo imagebuilder -t quay.io/openshift/origin-cluster-autoscaler-operator:v4.0 . --> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder --> WORKDIR /go/src/github.com/openshift/cluster-autoscaler-operator --> COPY . . --> ENV NO_DOCKER=1 --> ENV BUILD_DEST=/go/bin/cluster-autoscaler-operator --> RUN unset VERSION && make build go build -ldflags "-X github.com/openshift/cluster-autoscaler-operator/pkg/version.Raw=v0.0.0-202-gfde49c5" -o "/go/bin/cluster-autoscaler-operator" "github.com/openshift/cluster-autoscaler-operator/cmd/manager" --> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1 --> COPY --from=builder /go/bin/cluster-autoscaler-operator /usr/bin/ --> COPY --from=builder /go/src/github.com/openshift/cluster-autoscaler-operator/install /manifests --> CMD ["/usr/bin/cluster-autoscaler-operator"] --> LABEL io.openshift.release.operator true --> Committing changes to quay.io/openshift/origin-cluster-autoscaler-operator:v4.0 ... --> Done + kustomize build + sudo kubectl apply -f - customresourcedefinition.apiextensions.k8s.io "clusterautoscalers.autoscaling.openshift.io" created customresourcedefinition.apiextensions.k8s.io "machineautoscalers.autoscaling.openshift.io" created serviceaccount "cluster-autoscaler" created serviceaccount "cluster-autoscaler-operator" created role.rbac.authorization.k8s.io "cluster-autoscaler" created role.rbac.authorization.k8s.io "prometheus-k8s-cluster-autoscaler-operator" created role.rbac.authorization.k8s.io "cluster-autoscaler-operator" created clusterrole.rbac.authorization.k8s.io "cluster-autoscaler" created clusterrole.rbac.authorization.k8s.io "cluster-autoscaler-operator" created rolebinding.rbac.authorization.k8s.io "cluster-autoscaler" created rolebinding.rbac.authorization.k8s.io "prometheus-k8s-cluster-autoscaler-operator" created rolebinding.rbac.authorization.k8s.io "cluster-autoscaler-operator" created clusterrolebinding.rbac.authorization.k8s.io "cluster-autoscaler" created clusterrolebinding.rbac.authorization.k8s.io "cluster-autoscaler-operator" created configmap "cluster-autoscaler-operator-ca" created secret "cluster-autoscaler-operator-cert" created service "cluster-autoscaler-operator" created deployment.apps "cluster-autoscaler-operator" created + set +o xtrace ########## FINISHED STAGE: SUCCESS: DEPLOY CLUSTER AUTOSCALER OPERATOR [00h 00m 45s] ########## [workspace] $ /bin/bash /tmp/jenkins7087671254887836057.sh ########## STARTING STAGE: DEPLOY CLUSTER RESOURCES ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.k4bcmv8v7U + cat + chmod +x /tmp/tmp.k4bcmv8v7U + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.k4bcmv8v7U openshiftdevel:/tmp/tmp.k4bcmv8v7U + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.k4bcmv8v7U"' + cd /home/origin + export GOPATH=/data + GOPATH=/data + cd /data/src/github.com/openshift/cluster-api-provider-kubemark + sudo kubectl apply -f examples/machine-set.yaml machineset.machine.openshift.io "kubemark-actuator-testing-machineset" created + sudo kubectl apply -f examples/static-machine.yaml machine.machine.openshift.io "minikube-static-machine" created + sudo kubectl apply -f examples/worker-machinesets.yaml machineset.machine.openshift.io "kubemark-actuator-testing-machineset-red" created machineset.machine.openshift.io "kubemark-actuator-testing-machineset-green" created machineset.machine.openshift.io "kubemark-actuator-testing-machineset-blue" created + set +o xtrace ########## FINISHED STAGE: SUCCESS: DEPLOY CLUSTER RESOURCES [00h 00m 02s] ########## [workspace] $ /bin/bash /tmp/jenkins2984087342585737974.sh ########## STARTING STAGE: INSTALL GO 1.10.1 ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.MeHqax0fiH + cat + chmod +x /tmp/tmp.MeHqax0fiH + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.MeHqax0fiH openshiftdevel:/tmp/tmp.MeHqax0fiH + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.MeHqax0fiH"' + cd /home/origin + mkdir -p /home/origin/bin + curl -sL -o /home/origin/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme + chmod +x /home/origin/bin/gimme + gimme 1.10.1 unset GOOS; unset GOARCH; export GOROOT='/home/origin/.gimme/versions/go1.10.1.linux.amd64'; export PATH="/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:${PATH}"; go version >&2; export GIMME_ENV="/home/origin/.gimme/envs/go1.10.1.env" + source /home/origin/.gimme/envs/go1.10.1.env ++ unset GOOS ++ unset GOARCH ++ export GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64 ++ GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64 ++ export PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin ++ PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin ++ go version go version go1.10.1 linux/amd64 + sudo cp /home/origin/.gimme/versions/go1.10.1.linux.amd64/bin/go /bin/go + set +o xtrace ########## FINISHED STAGE: SUCCESS: INSTALL GO 1.10.1 [00h 00m 07s] ########## [workspace] $ /bin/bash /tmp/jenkins2118480288397065073.sh ########## STARTING STAGE: RUN E2E TESTS ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.VzZZKgDTyf + cat + chmod +x /tmp/tmp.VzZZKgDTyf + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.VzZZKgDTyf openshiftdevel:/tmp/tmp.VzZZKgDTyf + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.VzZZKgDTyf"' + cd /home/origin + set +x go version go1.10.1 linux/amd64 go test -timeout 30m \ -v ./vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e \ -kubeconfig ${KUBECONFIG:-~/.kube/config} \ -machine-api-namespace ${NAMESPACE:-kube-system} \ -ginkgo.v \ -args -v 5 -logtostderr true === RUN TestE2E Running Suite: Machine Suite ============================ Random Seed: 1565007445 Will run 15 of 15 specs [Feature:Operators] Cluster autoscaler operator deployment should be available /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:12 I0805 12:17:25.632775 4338 framework.go:397] >>> kubeConfig: /root/.kube/config I0805 12:17:25.669649 4338 utils.go:58] Deployment "cluster-autoscaler-operator" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0) • ------------------------------ [Feature:Operators] Cluster autoscaler cluster operator status should be available /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:23 I0805 12:17:25.669728 4338 framework.go:397] >>> kubeConfig: /root/.kube/config • ------------------------------ [Feature:MachineHealthCheck] MachineHealthCheck controller with node-unhealthy-conditions configmap should delete unhealthy machine /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go:99 I0805 12:17:25.684967 4338 framework.go:397] >>> kubeConfig: /root/.kube/config I0805 12:17:25.700997 4338 machinehealthcheck.go:61] Skipping machine health checking test I0805 12:17:25.701439 4338 machinehealthcheck.go:116] Skipping machine health checking test S [SKIPPING] in Spec Setup (BeforeEach) [0.017 seconds] [Feature:MachineHealthCheck] MachineHealthCheck controller /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go:24 with node-unhealthy-conditions configmap [BeforeEach] /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go:83 should delete unhealthy machine /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go:99 Skipping machine health checking test /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go:62 ------------------------------ [Feature:MachineHealthCheck] MachineHealthCheck controller should delete unhealthy machine /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go:110 I0805 12:17:25.701962 4338 framework.go:397] >>> kubeConfig: /root/.kube/config I0805 12:17:25.710968 4338 machinehealthcheck.go:61] Skipping machine health checking test I0805 12:17:25.711400 4338 machinehealthcheck.go:116] Skipping machine health checking test S [SKIPPING] in Spec Setup (BeforeEach) [0.010 seconds] [Feature:MachineHealthCheck] MachineHealthCheck controller /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go:24 should delete unhealthy machine [BeforeEach] /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go:110 Skipping machine health checking test /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/machinehealthcheck/machinehealthcheck.go:62 ------------------------------ [Feature:Machines][Serial] Autoscaler should scale up and down /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:191 I0805 12:17:25.711915 4338 framework.go:397] >>> kubeConfig: /root/.kube/config I0805 12:17:25.714425 4338 framework.go:397] >>> kubeConfig: /root/.kube/config STEP: Getting machinesets STEP: Getting nodes STEP: Creating 4 machineautoscalers I0805 12:17:25.732739 4338 autoscaler.go:234] Create MachineAutoscaler backed by MachineSet kube-system/kubemark-actuator-testing-machineset - min:1, max:2 I0805 12:17:25.747148 4338 autoscaler.go:234] Create MachineAutoscaler backed by MachineSet kube-system/kubemark-actuator-testing-machineset-blue - min:1, max:2 I0805 12:17:25.750924 4338 autoscaler.go:234] Create MachineAutoscaler backed by MachineSet kube-system/kubemark-actuator-testing-machineset-green - min:1, max:2 I0805 12:17:25.754673 4338 autoscaler.go:234] Create MachineAutoscaler backed by MachineSet kube-system/kubemark-actuator-testing-machineset-red - min:1, max:2 STEP: Creating ClusterAutoscaler configured with maxNodesTotal:8 STEP: Creating scale-out workload I0805 12:17:25.889769 4338 autoscaler.go:277] [10m0s remaining] Expecting 3 "ScaledUpGroup" events; observed 0 I0805 12:17:26.845401 4338 autoscaler.go:256] cluster-autoscaler: cluster-autoscaler-default-67874dcc6f-jghkq became leader I0805 12:17:28.889982 4338 autoscaler.go:277] [9m57s remaining] Expecting 3 "ScaledUpGroup" events; observed 0 I0805 12:17:31.890179 4338 autoscaler.go:277] [9m54s remaining] Expecting 3 "ScaledUpGroup" events; observed 0 I0805 12:17:34.890373 4338 autoscaler.go:277] [9m51s remaining] Expecting 3 "ScaledUpGroup" events; observed 0 I0805 12:17:37.045039 4338 autoscaler.go:256] cluster-autoscaler-status: Max total nodes in cluster reached: 8 I0805 12:17:37.048728 4338 autoscaler.go:256] cluster-autoscaler-status: Scale-up: setting group kube-system/kubemark-actuator-testing-machineset-green size to 2 I0805 12:17:37.057727 4338 autoscaler.go:256] workload-6sxqt: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:37.065183 4338 autoscaler.go:256] cluster-autoscaler-status: Scale-up: group kube-system/kubemark-actuator-testing-machineset-green size set to 2 I0805 12:17:37.073112 4338 autoscaler.go:256] workload-frfbg: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:37.075150 4338 autoscaler.go:256] workload-9fjsf: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:37.080085 4338 autoscaler.go:256] workload-mghtm: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:37.084056 4338 autoscaler.go:256] workload-nsf55: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:37.087228 4338 autoscaler.go:256] workload-jsbtj: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:37.096543 4338 autoscaler.go:256] workload-pwjsl: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:37.245530 4338 autoscaler.go:256] workload-d2mf5: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:37.445734 4338 autoscaler.go:256] workload-nfdcv: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:37.647694 4338 autoscaler.go:256] workload-x7gqw: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:37.846249 4338 autoscaler.go:256] workload-7nk5q: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:37.890587 4338 autoscaler.go:277] [9m48s remaining] Expecting 3 "ScaledUpGroup" events; observed 1 I0805 12:17:38.046070 4338 autoscaler.go:256] workload-6drjz: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:38.245076 4338 autoscaler.go:256] workload-przng: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:38.447533 4338 autoscaler.go:256] workload-d9hp9: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:38.645213 4338 autoscaler.go:256] workload-2w7hq: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:38.845302 4338 autoscaler.go:256] workload-8kg4j: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:39.046132 4338 autoscaler.go:256] workload-t694m: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:39.245583 4338 autoscaler.go:256] workload-9vmz4: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:39.462147 4338 autoscaler.go:256] workload-fhdqb: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:39.645687 4338 autoscaler.go:256] workload-6b45p: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:39.845427 4338 autoscaler.go:256] workload-nb9j5: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:40.045984 4338 autoscaler.go:256] workload-7grp5: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:40.245256 4338 autoscaler.go:256] workload-lf82w: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:40.445223 4338 autoscaler.go:256] workload-hkzkx: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:40.645620 4338 autoscaler.go:256] workload-pvgkj: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:40.847439 4338 autoscaler.go:256] workload-2q9s8: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:40.890789 4338 autoscaler.go:277] [9m45s remaining] Expecting 3 "ScaledUpGroup" events; observed 1 I0805 12:17:41.045384 4338 autoscaler.go:256] workload-m5hbp: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:41.245144 4338 autoscaler.go:256] workload-v898x: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:41.445951 4338 autoscaler.go:256] workload-smr8x: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:41.645536 4338 autoscaler.go:256] workload-zdvmv: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:41.845490 4338 autoscaler.go:256] workload-jjnlk: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:42.046557 4338 autoscaler.go:256] workload-w8g8k: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:42.245424 4338 autoscaler.go:256] workload-hdm7c: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:42.446656 4338 autoscaler.go:256] workload-wd968: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:42.645281 4338 autoscaler.go:256] workload-rztgk: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:42.845423 4338 autoscaler.go:256] workload-jpttk: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:43.045162 4338 autoscaler.go:256] workload-nx68x: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:43.245177 4338 autoscaler.go:256] workload-2s89k: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:43.445239 4338 autoscaler.go:256] workload-xplw7: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:43.645589 4338 autoscaler.go:256] workload-5fwc8: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:43.845380 4338 autoscaler.go:256] workload-t7mc7: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:43.890922 4338 autoscaler.go:277] [9m42s remaining] Expecting 3 "ScaledUpGroup" events; observed 1 I0805 12:17:44.045587 4338 autoscaler.go:256] workload-sgm7d: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-green 1->2 (max: 2)}] I0805 12:17:46.891127 4338 autoscaler.go:277] [9m39s remaining] Expecting 3 "ScaledUpGroup" events; observed 1 I0805 12:17:47.090705 4338 autoscaler.go:256] cluster-autoscaler-status: Scale-up: setting group kube-system/kubemark-actuator-testing-machineset-red size to 2 I0805 12:17:47.108588 4338 autoscaler.go:256] workload-t694m: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:47.113562 4338 autoscaler.go:256] cluster-autoscaler-status: Scale-up: group kube-system/kubemark-actuator-testing-machineset-red size set to 2 I0805 12:17:47.118082 4338 autoscaler.go:256] workload-pvgkj: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:47.120451 4338 autoscaler.go:256] workload-2s89k: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:47.124084 4338 autoscaler.go:256] workload-zdvmv: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:47.129160 4338 autoscaler.go:256] workload-m5hbp: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:47.133275 4338 autoscaler.go:256] workload-jsbtj: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:47.135625 4338 autoscaler.go:256] workload-frfbg: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:47.277640 4338 autoscaler.go:256] workload-d2mf5: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:47.477527 4338 autoscaler.go:256] workload-nx68x: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:47.680663 4338 autoscaler.go:256] workload-8kg4j: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:47.878419 4338 autoscaler.go:256] workload-xplw7: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:48.079011 4338 autoscaler.go:256] workload-smr8x: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:48.280400 4338 autoscaler.go:256] workload-lf82w: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:48.477962 4338 autoscaler.go:256] workload-pwjsl: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:48.677643 4338 autoscaler.go:256] workload-d9hp9: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:48.877402 4338 autoscaler.go:256] workload-7grp5: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:49.077649 4338 autoscaler.go:256] workload-2q9s8: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:49.278681 4338 autoscaler.go:256] workload-hdm7c: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:49.480029 4338 autoscaler.go:256] workload-x7gqw: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:49.681578 4338 autoscaler.go:256] workload-v898x: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:49.878738 4338 autoscaler.go:256] workload-6sxqt: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:49.891225 4338 autoscaler.go:277] [9m36s remaining] Expecting 3 "ScaledUpGroup" events; observed 2 I0805 12:17:50.077598 4338 autoscaler.go:256] workload-mghtm: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:50.290190 4338 autoscaler.go:256] workload-nb9j5: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:50.477872 4338 autoscaler.go:256] workload-6drjz: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:50.678880 4338 autoscaler.go:256] workload-jjnlk: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:50.877616 4338 autoscaler.go:256] workload-nsf55: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:51.078327 4338 autoscaler.go:256] workload-2w7hq: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:51.278027 4338 autoscaler.go:256] workload-wd968: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:51.480289 4338 autoscaler.go:256] workload-hkzkx: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:51.686744 4338 autoscaler.go:256] workload-7nk5q: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:51.878149 4338 autoscaler.go:256] workload-rztgk: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:52.395689 4338 autoscaler.go:256] workload-t7mc7: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:52.415034 4338 autoscaler.go:256] workload-w8g8k: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:52.478757 4338 autoscaler.go:256] workload-sgm7d: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:52.678483 4338 autoscaler.go:256] workload-5fwc8: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:52.878334 4338 autoscaler.go:256] workload-fhdqb: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:52.891554 4338 autoscaler.go:277] [9m33s remaining] Expecting 3 "ScaledUpGroup" events; observed 2 I0805 12:17:53.077884 4338 autoscaler.go:256] workload-6b45p: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:53.278174 4338 autoscaler.go:256] workload-przng: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:53.479687 4338 autoscaler.go:256] workload-9fjsf: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:53.677337 4338 autoscaler.go:256] workload-nfdcv: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-red 1->2 (max: 2)}] I0805 12:17:55.891745 4338 autoscaler.go:277] [9m30s remaining] Expecting 3 "ScaledUpGroup" events; observed 2 I0805 12:17:57.122423 4338 autoscaler.go:256] cluster-autoscaler-status: Scale-up: setting group kube-system/kubemark-actuator-testing-machineset-blue size to 2 I0805 12:17:57.133219 4338 autoscaler.go:256] workload-nfdcv: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:57.142249 4338 autoscaler.go:256] cluster-autoscaler-status: Scale-up: group kube-system/kubemark-actuator-testing-machineset-blue size set to 2 I0805 12:17:57.144683 4338 autoscaler.go:256] workload-d9hp9: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:57.148093 4338 autoscaler.go:256] workload-pvgkj: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:57.150366 4338 autoscaler.go:256] workload-jjnlk: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:57.154520 4338 autoscaler.go:256] workload-nsf55: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:57.159371 4338 autoscaler.go:256] workload-m5hbp: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:57.163554 4338 autoscaler.go:256] workload-x7gqw: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:57.321599 4338 autoscaler.go:256] workload-xplw7: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:57.521639 4338 autoscaler.go:256] workload-smr8x: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:57.722149 4338 autoscaler.go:256] workload-d2mf5: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:57.921844 4338 autoscaler.go:256] workload-lf82w: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:58.121805 4338 autoscaler.go:256] workload-frfbg: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:58.321793 4338 autoscaler.go:256] workload-nx68x: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:58.521764 4338 autoscaler.go:256] workload-7nk5q: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:58.721878 4338 autoscaler.go:256] workload-wd968: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:58.891959 4338 autoscaler.go:277] [9m27s remaining] Expecting 3 "ScaledUpGroup" events; observed 3 I0805 12:17:58.892027 4338 autoscaler.go:296] [1m0s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:17:58.921883 4338 autoscaler.go:256] workload-hkzkx: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:59.121597 4338 autoscaler.go:256] workload-mghtm: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:59.322234 4338 autoscaler.go:256] workload-nb9j5: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:59.522420 4338 autoscaler.go:256] workload-2w7hq: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:59.731238 4338 autoscaler.go:256] workload-8kg4j: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:17:59.923732 4338 autoscaler.go:256] workload-t694m: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:00.122106 4338 autoscaler.go:256] workload-jsbtj: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:00.322648 4338 autoscaler.go:256] workload-przng: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:00.522003 4338 autoscaler.go:256] workload-5fwc8: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:00.722546 4338 autoscaler.go:256] workload-w8g8k: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:00.921998 4338 autoscaler.go:256] workload-9fjsf: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:01.121810 4338 autoscaler.go:256] workload-t7mc7: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:01.321684 4338 autoscaler.go:256] workload-pwjsl: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:01.522105 4338 autoscaler.go:256] workload-v898x: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:01.721974 4338 autoscaler.go:256] workload-fhdqb: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:01.892236 4338 autoscaler.go:296] [57s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:01.921993 4338 autoscaler.go:256] workload-zdvmv: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:02.122049 4338 autoscaler.go:256] workload-7grp5: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:02.322561 4338 autoscaler.go:256] workload-sgm7d: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:02.522588 4338 autoscaler.go:256] workload-2s89k: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:02.722255 4338 autoscaler.go:256] workload-rztgk: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:02.924989 4338 autoscaler.go:256] workload-6b45p: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:03.121989 4338 autoscaler.go:256] workload-6drjz: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:03.321816 4338 autoscaler.go:256] workload-hdm7c: pod triggered scale-up: [{kube-system/kubemark-actuator-testing-machineset-blue 1->2 (max: 2)}] I0805 12:18:04.892457 4338 autoscaler.go:296] [54s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:07.892681 4338 autoscaler.go:296] [51s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:10.892899 4338 autoscaler.go:296] [48s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:13.893128 4338 autoscaler.go:296] [45s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:16.893911 4338 autoscaler.go:296] [42s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:19.894135 4338 autoscaler.go:296] [39s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:22.894365 4338 autoscaler.go:296] [36s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:25.896943 4338 autoscaler.go:296] [33s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:28.897201 4338 autoscaler.go:296] [30s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:31.897459 4338 autoscaler.go:296] [27s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:34.897691 4338 autoscaler.go:296] [24s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:37.897915 4338 autoscaler.go:296] [21s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:40.898135 4338 autoscaler.go:296] [18s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:43.898374 4338 autoscaler.go:296] [15s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:46.898590 4338 autoscaler.go:296] [12s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:49.898819 4338 autoscaler.go:296] [9s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:52.899075 4338 autoscaler.go:296] [6s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 I0805 12:18:55.899304 4338 autoscaler.go:296] [3s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 3, max=3 STEP: Deleting workload I0805 12:18:58.902740 4338 autoscaler.go:310] [10m0s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:01.902887 4338 autoscaler.go:310] [9m57s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:04.903101 4338 autoscaler.go:310] [9m54s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:07.903304 4338 autoscaler.go:310] [9m51s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:10.903539 4338 autoscaler.go:310] [9m48s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:13.903768 4338 autoscaler.go:310] [9m45s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:16.903933 4338 autoscaler.go:310] [9m42s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:19.904166 4338 autoscaler.go:310] [9m39s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:22.904387 4338 autoscaler.go:310] [9m36s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:25.904638 4338 autoscaler.go:310] [9m33s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:28.904864 4338 autoscaler.go:310] [9m30s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:31.905018 4338 autoscaler.go:310] [9m27s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:34.905245 4338 autoscaler.go:310] [9m24s remaining] Waiting for cluster-autoscaler to generate 3 more "ScaleDownEmpty" events I0805 12:19:37.254881 4338 autoscaler.go:256] cluster-autoscaler-status: Scale-down: removing empty node f5eb46f9-3a47-4b64-9106-3b957e2a5c73 I0805 12:19:37.284098 4338 autoscaler.go:256] cluster-autoscaler-status: Scale-down: empty node f5eb46f9-3a47-4b64-9106-3b957e2a5c73 removed I0805 12:19:37.287209 4338 autoscaler.go:256] f5eb46f9-3a47-4b64-9106-3b957e2a5c73: node removed by cluster autoscaler I0805 12:19:37.905452 4338 autoscaler.go:310] [9m21s remaining] Waiting for cluster-autoscaler to generate 2 more "ScaleDownEmpty" events I0805 12:19:40.905658 4338 autoscaler.go:310] [9m18s remaining] Waiting for cluster-autoscaler to generate 2 more "ScaleDownEmpty" events I0805 12:19:43.905822 4338 autoscaler.go:310] [9m15s remaining] Waiting for cluster-autoscaler to generate 2 more "ScaleDownEmpty" events I0805 12:19:46.906067 4338 autoscaler.go:310] [9m12s remaining] Waiting for cluster-autoscaler to generate 2 more "ScaleDownEmpty" events I0805 12:19:47.293254 4338 autoscaler.go:256] cluster-autoscaler-status: Scale-down: removing empty node 1dea0527-9abd-42e9-8586-28e8ed2f6599 I0805 12:19:47.296631 4338 autoscaler.go:256] cluster-autoscaler-status: Scale-down: removing empty node 0b91513f-b668-4f08-bf0e-5d376358ade3 I0805 12:19:47.320165 4338 autoscaler.go:256] cluster-autoscaler-status: Scale-down: empty node 1dea0527-9abd-42e9-8586-28e8ed2f6599 removed I0805 12:19:47.330106 4338 autoscaler.go:256] 1dea0527-9abd-42e9-8586-28e8ed2f6599: node removed by cluster autoscaler I0805 12:19:47.336695 4338 autoscaler.go:256] cluster-autoscaler-status: Scale-down: empty node 0b91513f-b668-4f08-bf0e-5d376358ade3 removed I0805 12:19:47.340568 4338 autoscaler.go:256] 0b91513f-b668-4f08-bf0e-5d376358ade3: node removed by cluster autoscaler I0805 12:19:49.906284 4338 autoscaler.go:310] [9m9s remaining] Waiting for cluster-autoscaler to generate 0 more "ScaleDownEmpty" events STEP: Waiting for scaled up nodes to be deleted I0805 12:19:49.910559 4338 autoscaler.go:320] [3m0s remaining] Waiting fo cluster to reach original node count of 5; currently have 5 • [SLOW TEST:144.242 seconds] [Feature:Machines][Serial] Autoscaler should /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:190 scale up and down /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:191 ------------------------------ [Feature:Operators] Machine API cluster operator status should be available /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:53 I0805 12:19:49.954137 4338 framework.go:397] >>> kubeConfig: /root/.kube/config • ------------------------------ [Feature:Machines] Managed cluster should have machines linked with nodes /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:130 I0805 12:19:49.970761 4338 framework.go:397] >>> kubeConfig: /root/.kube/config I0805 12:19:50.007179 4338 utils.go:46] Expecting the same number of machines and nodes, have 5 nodes and 5 machines I0805 12:19:50.007213 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-4gn5h" is linked to node "b1df8fe6-a88a-4518-918d-3509be42cf63" I0805 12:19:50.007227 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-blue-5hvq4" is linked to node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" I0805 12:19:50.007248 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-green-7sjsb" is linked to node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" I0805 12:19:50.007260 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-red-klb2k" is linked to node "76b98f5e-392b-4471-abd7-652a7503b290" I0805 12:19:50.007272 4338 utils.go:69] Machine "minikube-static-machine" is linked to node "minikube" • ------------------------------ [Feature:Machines] Managed cluster should have ability to additively reconcile taints from machine to nodes /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:139 I0805 12:19:50.007341 4338 framework.go:397] >>> kubeConfig: /root/.kube/config STEP: getting machine "kubemark-actuator-testing-machineset-4gn5h" I0805 12:19:50.051078 4338 utils.go:164] Machine "kubemark-actuator-testing-machineset-4gn5h" is backing node "b1df8fe6-a88a-4518-918d-3509be42cf63" STEP: getting the backed node "b1df8fe6-a88a-4518-918d-3509be42cf63" STEP: updating node "b1df8fe6-a88a-4518-918d-3509be42cf63" with taint: {not-from-machine true NoSchedule <nil>} STEP: updating machine "kubemark-actuator-testing-machineset-4gn5h" with taint: {from-machine-524b2927-b77b-11e9-8c83-0ac1370f6c5a true NoSchedule <nil>} I0805 12:19:50.111358 4338 infra.go:178] Getting node from machine again for verification of taints I0805 12:19:50.114951 4338 utils.go:164] Machine "kubemark-actuator-testing-machineset-4gn5h" is backing node "b1df8fe6-a88a-4518-918d-3509be42cf63" I0805 12:19:50.114974 4338 infra.go:188] Expected : map[from-machine-524b2927-b77b-11e9-8c83-0ac1370f6c5a:{} not-from-machine:{}], observed map[kubemark:{} not-from-machine:{} from-machine-524b2927-b77b-11e9-8c83-0ac1370f6c5a:{}] , difference map[], STEP: Getting the latest version of the original machine STEP: Setting back the original machine taints STEP: Getting the latest version of the node I0805 12:19:50.127678 4338 utils.go:164] Machine "kubemark-actuator-testing-machineset-4gn5h" is backing node "b1df8fe6-a88a-4518-918d-3509be42cf63" STEP: Setting back the original node taints • ------------------------------ [Feature:Machines] Managed cluster should recover from deleted worker machines /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:214 I0805 12:19:50.134260 4338 framework.go:397] >>> kubeConfig: /root/.kube/config STEP: checking initial cluster state I0805 12:19:50.150023 4338 utils.go:86] Cluster size is 5 nodes I0805 12:19:50.150046 4338 utils.go:237] Cluster size expected to be 5 nodes I0805 12:19:50.152825 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0805 12:19:50.152876 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0805 12:19:50.152895 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0805 12:19:50.152904 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0805 12:19:50.155541 4338 utils.go:230] Node "76b98f5e-392b-4471-abd7-652a7503b290". Ready: true. Unschedulable: false I0805 12:19:50.155563 4338 utils.go:230] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f". Ready: true. Unschedulable: false I0805 12:19:50.155572 4338 utils.go:230] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209". Ready: true. Unschedulable: false I0805 12:19:50.155580 4338 utils.go:230] Node "b1df8fe6-a88a-4518-918d-3509be42cf63". Ready: true. Unschedulable: false I0805 12:19:50.155588 4338 utils.go:230] Node "minikube". Ready: true. Unschedulable: false I0805 12:19:50.158114 4338 utils.go:86] Cluster size is 5 nodes I0805 12:19:50.158132 4338 utils.go:255] waiting for all nodes to be ready I0805 12:19:50.160704 4338 utils.go:260] waiting for all nodes to be schedulable I0805 12:19:50.163218 4338 utils.go:287] Node "76b98f5e-392b-4471-abd7-652a7503b290" is schedulable I0805 12:19:50.163240 4338 utils.go:287] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" is schedulable I0805 12:19:50.163249 4338 utils.go:287] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" is schedulable I0805 12:19:50.163256 4338 utils.go:287] Node "b1df8fe6-a88a-4518-918d-3509be42cf63" is schedulable I0805 12:19:50.163264 4338 utils.go:287] Node "minikube" is schedulable I0805 12:19:50.163272 4338 utils.go:265] waiting for each node to be backed by a machine I0805 12:19:50.169255 4338 utils.go:46] Expecting the same number of machines and nodes, have 5 nodes and 5 machines I0805 12:19:50.169283 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-4gn5h" is linked to node "b1df8fe6-a88a-4518-918d-3509be42cf63" I0805 12:19:50.169304 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-blue-5hvq4" is linked to node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" I0805 12:19:50.169315 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-green-7sjsb" is linked to node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" I0805 12:19:50.169327 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-red-klb2k" is linked to node "76b98f5e-392b-4471-abd7-652a7503b290" I0805 12:19:50.169338 4338 utils.go:69] Machine "minikube-static-machine" is linked to node "minikube" STEP: getting worker node STEP: deleting machine object "kubemark-actuator-testing-machineset-red-klb2k" STEP: waiting for node object "76b98f5e-392b-4471-abd7-652a7503b290" to go away I0805 12:19:50.186124 4338 infra.go:249] Node "76b98f5e-392b-4471-abd7-652a7503b290" still exists. Node conditions are: [{OutOfDisk False 2019-08-05 12:19:48 +0000 UTC 2019-08-05 12:17:49 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-08-05 12:19:48 +0000 UTC 2019-08-05 12:17:49 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-08-05 12:19:48 +0000 UTC 2019-08-05 12:17:49 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-08-05 12:19:48 +0000 UTC 2019-08-05 12:17:49 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-08-05 12:19:48 +0000 UTC 2019-08-05 12:17:49 +0000 UTC KubeletReady kubelet is posting ready status}] STEP: waiting for new node object to come up I0805 12:19:55.190896 4338 utils.go:237] Cluster size expected to be 5 nodes I0805 12:19:55.193927 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0805 12:19:55.193956 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0805 12:19:55.193968 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0805 12:19:55.193978 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0805 12:19:55.197574 4338 utils.go:230] Node "4936be43-004e-4f92-970e-e9a1af845a6c". Ready: true. Unschedulable: false I0805 12:19:55.197594 4338 utils.go:230] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f". Ready: true. Unschedulable: false I0805 12:19:55.197600 4338 utils.go:230] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209". Ready: true. Unschedulable: false I0805 12:19:55.197605 4338 utils.go:230] Node "b1df8fe6-a88a-4518-918d-3509be42cf63". Ready: true. Unschedulable: false I0805 12:19:55.197613 4338 utils.go:230] Node "minikube". Ready: true. Unschedulable: false I0805 12:19:55.200627 4338 utils.go:86] Cluster size is 5 nodes I0805 12:19:55.200649 4338 utils.go:255] waiting for all nodes to be ready I0805 12:19:55.203679 4338 utils.go:260] waiting for all nodes to be schedulable I0805 12:19:55.206419 4338 utils.go:287] Node "4936be43-004e-4f92-970e-e9a1af845a6c" is schedulable I0805 12:19:55.206440 4338 utils.go:287] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" is schedulable I0805 12:19:55.206448 4338 utils.go:287] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" is schedulable I0805 12:19:55.206456 4338 utils.go:287] Node "b1df8fe6-a88a-4518-918d-3509be42cf63" is schedulable I0805 12:19:55.206464 4338 utils.go:287] Node "minikube" is schedulable I0805 12:19:55.206471 4338 utils.go:265] waiting for each node to be backed by a machine I0805 12:19:55.212694 4338 utils.go:46] Expecting the same number of machines and nodes, have 5 nodes and 5 machines I0805 12:19:55.212718 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-4gn5h" is linked to node "b1df8fe6-a88a-4518-918d-3509be42cf63" I0805 12:19:55.212726 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-blue-5hvq4" is linked to node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" I0805 12:19:55.212733 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-green-7sjsb" is linked to node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" I0805 12:19:55.212739 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-red-ddndz" is linked to node "4936be43-004e-4f92-970e-e9a1af845a6c" I0805 12:19:55.212746 4338 utils.go:69] Machine "minikube-static-machine" is linked to node "minikube" • [SLOW TEST:5.079 seconds] [Feature:Machines] Managed cluster should /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:127 recover from deleted worker machines /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:214 ------------------------------ [Feature:Machines] Managed cluster should grow or decrease when scaling out or in /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:261 STEP: checking initial cluster state I0805 12:19:55.212857 4338 framework.go:397] >>> kubeConfig: /root/.kube/config I0805 12:19:55.228759 4338 utils.go:86] Cluster size is 5 nodes I0805 12:19:55.228782 4338 utils.go:237] Cluster size expected to be 5 nodes I0805 12:19:55.232058 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0805 12:19:55.232079 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0805 12:19:55.232089 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0805 12:19:55.232098 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0805 12:19:55.234966 4338 utils.go:230] Node "4936be43-004e-4f92-970e-e9a1af845a6c". Ready: true. Unschedulable: false I0805 12:19:55.234987 4338 utils.go:230] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f". Ready: true. Unschedulable: false I0805 12:19:55.234996 4338 utils.go:230] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209". Ready: true. Unschedulable: false I0805 12:19:55.235004 4338 utils.go:230] Node "b1df8fe6-a88a-4518-918d-3509be42cf63". Ready: true. Unschedulable: false I0805 12:19:55.235012 4338 utils.go:230] Node "minikube". Ready: true. Unschedulable: false I0805 12:19:55.237823 4338 utils.go:86] Cluster size is 5 nodes I0805 12:19:55.237871 4338 utils.go:255] waiting for all nodes to be ready I0805 12:19:55.240566 4338 utils.go:260] waiting for all nodes to be schedulable I0805 12:19:55.244688 4338 utils.go:287] Node "4936be43-004e-4f92-970e-e9a1af845a6c" is schedulable I0805 12:19:55.244711 4338 utils.go:287] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" is schedulable I0805 12:19:55.244720 4338 utils.go:287] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" is schedulable I0805 12:19:55.244728 4338 utils.go:287] Node "b1df8fe6-a88a-4518-918d-3509be42cf63" is schedulable I0805 12:19:55.244736 4338 utils.go:287] Node "minikube" is schedulable I0805 12:19:55.244743 4338 utils.go:265] waiting for each node to be backed by a machine I0805 12:19:55.250535 4338 utils.go:46] Expecting the same number of machines and nodes, have 5 nodes and 5 machines I0805 12:19:55.250559 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-4gn5h" is linked to node "b1df8fe6-a88a-4518-918d-3509be42cf63" I0805 12:19:55.250572 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-blue-5hvq4" is linked to node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" I0805 12:19:55.250583 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-green-7sjsb" is linked to node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" I0805 12:19:55.250596 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-red-ddndz" is linked to node "4936be43-004e-4f92-970e-e9a1af845a6c" I0805 12:19:55.250610 4338 utils.go:69] Machine "minikube-static-machine" is linked to node "minikube" STEP: scaling out "kubemark-actuator-testing-machineset" machineSet to 3 replicas I0805 12:19:55.253343 4338 framework.go:397] >>> kubeConfig: /root/.kube/config STEP: waiting for cluster to grow 2 nodes. Size should be 7 I0805 12:19:55.284062 4338 utils.go:237] Cluster size expected to be 7 nodes I0805 12:19:55.288589 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset" replicas 3. Ready: 1, available 1 I0805 12:19:55.288611 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0805 12:19:55.288621 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0805 12:19:55.288630 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0805 12:19:55.292892 4338 utils.go:230] Node "4936be43-004e-4f92-970e-e9a1af845a6c". Ready: true. Unschedulable: false I0805 12:19:55.292910 4338 utils.go:230] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f". Ready: true. Unschedulable: false I0805 12:19:55.292918 4338 utils.go:230] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209". Ready: true. Unschedulable: false I0805 12:19:55.292926 4338 utils.go:230] Node "b1df8fe6-a88a-4518-918d-3509be42cf63". Ready: true. Unschedulable: false I0805 12:19:55.292933 4338 utils.go:230] Node "minikube". Ready: true. Unschedulable: false I0805 12:19:55.308467 4338 utils.go:86] Cluster size is 5 nodes I0805 12:20:00.308688 4338 utils.go:237] Cluster size expected to be 7 nodes I0805 12:20:00.312784 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset" replicas 3. Ready: 3, available 3 I0805 12:20:00.312806 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0805 12:20:00.312812 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0805 12:20:00.312817 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0805 12:20:00.315665 4338 utils.go:230] Node "04e12b96-f007-4af0-981e-ebf8d1f9cb06". Ready: true. Unschedulable: false I0805 12:20:00.315685 4338 utils.go:230] Node "4936be43-004e-4f92-970e-e9a1af845a6c". Ready: true. Unschedulable: false I0805 12:20:00.315692 4338 utils.go:230] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f". Ready: true. Unschedulable: false I0805 12:20:00.315697 4338 utils.go:230] Node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767". Ready: true. Unschedulable: false I0805 12:20:00.315701 4338 utils.go:230] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209". Ready: true. Unschedulable: false I0805 12:20:00.315706 4338 utils.go:230] Node "b1df8fe6-a88a-4518-918d-3509be42cf63". Ready: true. Unschedulable: false I0805 12:20:00.315711 4338 utils.go:230] Node "minikube". Ready: true. Unschedulable: false I0805 12:20:00.319135 4338 utils.go:86] Cluster size is 7 nodes I0805 12:20:00.319163 4338 utils.go:255] waiting for all nodes to be ready I0805 12:20:00.321770 4338 utils.go:260] waiting for all nodes to be schedulable I0805 12:20:00.324636 4338 utils.go:287] Node "04e12b96-f007-4af0-981e-ebf8d1f9cb06" is schedulable I0805 12:20:00.324655 4338 utils.go:287] Node "4936be43-004e-4f92-970e-e9a1af845a6c" is schedulable I0805 12:20:00.324660 4338 utils.go:287] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" is schedulable I0805 12:20:00.324665 4338 utils.go:287] Node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767" is schedulable I0805 12:20:00.324673 4338 utils.go:287] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" is schedulable I0805 12:20:00.324681 4338 utils.go:287] Node "b1df8fe6-a88a-4518-918d-3509be42cf63" is schedulable I0805 12:20:00.324689 4338 utils.go:287] Node "minikube" is schedulable I0805 12:20:00.324704 4338 utils.go:265] waiting for each node to be backed by a machine I0805 12:20:00.330952 4338 utils.go:46] Expecting the same number of machines and nodes, have 7 nodes and 7 machines I0805 12:20:00.330970 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-46fpq" is linked to node "04e12b96-f007-4af0-981e-ebf8d1f9cb06" I0805 12:20:00.330978 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-4gn5h" is linked to node "b1df8fe6-a88a-4518-918d-3509be42cf63" I0805 12:20:00.330985 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-blue-5hvq4" is linked to node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" I0805 12:20:00.330991 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-green-7sjsb" is linked to node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" I0805 12:20:00.330997 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-red-ddndz" is linked to node "4936be43-004e-4f92-970e-e9a1af845a6c" I0805 12:20:00.331004 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-w2dl6" is linked to node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767" I0805 12:20:00.331010 4338 utils.go:69] Machine "minikube-static-machine" is linked to node "minikube" STEP: scaling in "kubemark-actuator-testing-machineset" machineSet to 1 replicas I0805 12:20:00.331024 4338 framework.go:397] >>> kubeConfig: /root/.kube/config STEP: waiting for cluster to decrease 2 nodes. Final size should be 5 nodes I0805 12:20:00.348556 4338 utils.go:237] Cluster size expected to be 5 nodes I0805 12:20:00.353015 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 3, available 3 I0805 12:20:00.353036 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0805 12:20:00.353046 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0805 12:20:00.353055 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0805 12:20:00.358974 4338 utils.go:230] Node "04e12b96-f007-4af0-981e-ebf8d1f9cb06". Ready: true. Unschedulable: false I0805 12:20:00.358996 4338 utils.go:230] Node "4936be43-004e-4f92-970e-e9a1af845a6c". Ready: true. Unschedulable: false I0805 12:20:00.359004 4338 utils.go:230] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f". Ready: true. Unschedulable: false I0805 12:20:00.359012 4338 utils.go:230] Node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767". Ready: true. Unschedulable: false I0805 12:20:00.359020 4338 utils.go:230] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209". Ready: true. Unschedulable: false I0805 12:20:00.359028 4338 utils.go:230] Node "b1df8fe6-a88a-4518-918d-3509be42cf63". Ready: true. Unschedulable: false I0805 12:20:00.359039 4338 utils.go:230] Node "minikube". Ready: true. Unschedulable: false I0805 12:20:00.362889 4338 utils.go:86] Cluster size is 7 nodes I0805 12:20:05.363102 4338 utils.go:237] Cluster size expected to be 5 nodes I0805 12:20:05.367584 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0805 12:20:05.367614 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0805 12:20:05.367624 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0805 12:20:05.367633 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0805 12:20:05.370863 4338 utils.go:230] Node "4936be43-004e-4f92-970e-e9a1af845a6c". Ready: true. Unschedulable: false I0805 12:20:05.370885 4338 utils.go:230] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f". Ready: true. Unschedulable: false I0805 12:20:05.370891 4338 utils.go:230] Node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767". Ready: true. Unschedulable: false I0805 12:20:05.370897 4338 utils.go:230] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209". Ready: true. Unschedulable: false I0805 12:20:05.370902 4338 utils.go:230] Node "minikube". Ready: true. Unschedulable: false I0805 12:20:05.373395 4338 utils.go:86] Cluster size is 5 nodes I0805 12:20:05.373421 4338 utils.go:255] waiting for all nodes to be ready I0805 12:20:05.375964 4338 utils.go:260] waiting for all nodes to be schedulable I0805 12:20:05.378633 4338 utils.go:287] Node "4936be43-004e-4f92-970e-e9a1af845a6c" is schedulable I0805 12:20:05.378653 4338 utils.go:287] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" is schedulable I0805 12:20:05.378661 4338 utils.go:287] Node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767" is schedulable I0805 12:20:05.378669 4338 utils.go:287] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" is schedulable I0805 12:20:05.378677 4338 utils.go:287] Node "minikube" is schedulable I0805 12:20:05.378685 4338 utils.go:265] waiting for each node to be backed by a machine I0805 12:20:05.387625 4338 utils.go:46] Expecting the same number of machines and nodes, have 5 nodes and 5 machines I0805 12:20:05.387661 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-blue-5hvq4" is linked to node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" I0805 12:20:05.387675 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-green-7sjsb" is linked to node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" I0805 12:20:05.387695 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-red-ddndz" is linked to node "4936be43-004e-4f92-970e-e9a1af845a6c" I0805 12:20:05.387706 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-w2dl6" is linked to node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767" I0805 12:20:05.387725 4338 utils.go:69] Machine "minikube-static-machine" is linked to node "minikube" • [SLOW TEST:10.175 seconds] [Feature:Machines] Managed cluster should /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:127 grow or decrease when scaling out or in /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:261 ------------------------------ [Feature:Machines] Managed cluster should grow and decrease when scaling different machineSets simultaneously /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:300 I0805 12:20:05.387907 4338 framework.go:397] >>> kubeConfig: /root/.kube/config STEP: checking initial cluster size I0805 12:20:05.400404 4338 utils.go:86] Cluster size is 5 nodes STEP: getting worker machineSets STEP: scaling "kubemark-actuator-testing-machineset" from 1 to 3 replicas I0805 12:20:05.403089 4338 framework.go:397] >>> kubeConfig: /root/.kube/config STEP: scaling "kubemark-actuator-testing-machineset-blue" from 1 to 3 replicas I0805 12:20:05.420437 4338 framework.go:397] >>> kubeConfig: /root/.kube/config E0805 12:20:05.461986 4338 utils.go:156] Machine "kubemark-actuator-testing-machineset-b8hq9" has no NodeRef I0805 12:20:10.484824 4338 utils.go:164] Machine "kubemark-actuator-testing-machineset-b8hq9" is backing node "d22a1301-2ddf-4630-82d8-e242d9c7b6df" I0805 12:20:10.509218 4338 utils.go:164] Machine "kubemark-actuator-testing-machineset-fc5hh" is backing node "7bf2bf8a-30ed-45b1-8210-ae8529937d15" I0805 12:20:10.511149 4338 utils.go:164] Machine "kubemark-actuator-testing-machineset-w2dl6" is backing node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767" I0805 12:20:10.511172 4338 utils.go:148] MachineSet "kubemark-actuator-testing-machineset" have 3 nodes I0805 12:20:10.511184 4338 utils.go:176] Node "d22a1301-2ddf-4630-82d8-e242d9c7b6df" is ready. Conditions are: [{OutOfDisk False 2019-08-05 12:20:10 +0000 UTC 2019-08-05 12:20:08 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-08-05 12:20:10 +0000 UTC 2019-08-05 12:20:08 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-08-05 12:20:10 +0000 UTC 2019-08-05 12:20:08 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-08-05 12:20:10 +0000 UTC 2019-08-05 12:20:08 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-08-05 12:20:10 +0000 UTC 2019-08-05 12:20:08 +0000 UTC KubeletReady kubelet is posting ready status}] I0805 12:20:10.511229 4338 utils.go:176] Node "7bf2bf8a-30ed-45b1-8210-ae8529937d15" is ready. Conditions are: [{OutOfDisk False 2019-08-05 12:20:09 +0000 UTC 2019-08-05 12:20:09 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-08-05 12:20:09 +0000 UTC 2019-08-05 12:20:09 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-08-05 12:20:09 +0000 UTC 2019-08-05 12:20:09 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-08-05 12:20:09 +0000 UTC 2019-08-05 12:20:09 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-08-05 12:20:09 +0000 UTC 2019-08-05 12:20:09 +0000 UTC KubeletReady kubelet is posting ready status}] I0805 12:20:10.511263 4338 utils.go:176] Node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767" is ready. Conditions are: [{OutOfDisk False 2019-08-05 12:20:10 +0000 UTC 2019-08-05 12:19:58 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-08-05 12:20:10 +0000 UTC 2019-08-05 12:19:58 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-08-05 12:20:10 +0000 UTC 2019-08-05 12:19:58 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-08-05 12:20:10 +0000 UTC 2019-08-05 12:19:58 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-08-05 12:20:10 +0000 UTC 2019-08-05 12:19:58 +0000 UTC KubeletReady kubelet is posting ready status}] I0805 12:20:10.522151 4338 utils.go:164] Machine "kubemark-actuator-testing-machineset-blue-5hvq4" is backing node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" E0805 12:20:10.522180 4338 utils.go:156] Machine "kubemark-actuator-testing-machineset-blue-m2c2t" has no NodeRef I0805 12:20:15.529521 4338 utils.go:164] Machine "kubemark-actuator-testing-machineset-blue-5hvq4" is backing node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" I0805 12:20:15.532492 4338 utils.go:164] Machine "kubemark-actuator-testing-machineset-blue-m2c2t" is backing node "62b26cae-8dea-415e-b47d-4d9ca9e6ed05" I0805 12:20:15.534046 4338 utils.go:164] Machine "kubemark-actuator-testing-machineset-blue-xvlzk" is backing node "a26f3f1c-6dcb-417e-b3bd-21e4c525b777" I0805 12:20:15.534072 4338 utils.go:148] MachineSet "kubemark-actuator-testing-machineset-blue" have 3 nodes I0805 12:20:15.534084 4338 utils.go:176] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f" is ready. Conditions are: [{OutOfDisk False 2019-08-05 12:20:15 +0000 UTC 2019-08-05 12:16:44 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-08-05 12:20:15 +0000 UTC 2019-08-05 12:16:44 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-08-05 12:20:15 +0000 UTC 2019-08-05 12:16:44 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-08-05 12:20:15 +0000 UTC 2019-08-05 12:16:44 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-08-05 12:20:15 +0000 UTC 2019-08-05 12:16:44 +0000 UTC KubeletReady kubelet is posting ready status}] I0805 12:20:15.534136 4338 utils.go:176] Node "62b26cae-8dea-415e-b47d-4d9ca9e6ed05" is ready. Conditions are: [{OutOfDisk False 2019-08-05 12:20:13 +0000 UTC 2019-08-05 12:20:11 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-08-05 12:20:13 +0000 UTC 2019-08-05 12:20:11 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-08-05 12:20:13 +0000 UTC 2019-08-05 12:20:11 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-08-05 12:20:13 +0000 UTC 2019-08-05 12:20:11 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-08-05 12:20:13 +0000 UTC 2019-08-05 12:20:11 +0000 UTC KubeletReady kubelet is posting ready status}] I0805 12:20:15.534164 4338 utils.go:176] Node "a26f3f1c-6dcb-417e-b3bd-21e4c525b777" is ready. Conditions are: [{OutOfDisk False 2019-08-05 12:20:14 +0000 UTC 2019-08-05 12:20:10 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-08-05 12:20:14 +0000 UTC 2019-08-05 12:20:10 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-08-05 12:20:14 +0000 UTC 2019-08-05 12:20:10 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-08-05 12:20:14 +0000 UTC 2019-08-05 12:20:10 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-08-05 12:20:14 +0000 UTC 2019-08-05 12:20:10 +0000 UTC KubeletReady kubelet is posting ready status}] STEP: scaling "kubemark-actuator-testing-machineset" from 3 to 1 replicas I0805 12:20:15.534208 4338 framework.go:397] >>> kubeConfig: /root/.kube/config STEP: scaling "kubemark-actuator-testing-machineset-blue" from 3 to 1 replicas I0805 12:20:15.549531 4338 framework.go:397] >>> kubeConfig: /root/.kube/config STEP: waiting for cluster to get back to original size. Final size should be 5 nodes I0805 12:20:15.579092 4338 utils.go:237] Cluster size expected to be 5 nodes I0805 12:20:15.582355 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 3, available 3 I0805 12:20:15.582384 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 3, available 3 I0805 12:20:15.582394 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0805 12:20:15.582403 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0805 12:20:15.587828 4338 utils.go:230] Node "4936be43-004e-4f92-970e-e9a1af845a6c". Ready: true. Unschedulable: false I0805 12:20:15.587862 4338 utils.go:230] Node "62b26cae-8dea-415e-b47d-4d9ca9e6ed05". Ready: true. Unschedulable: false I0805 12:20:15.587872 4338 utils.go:230] Node "7b089bd0-5ff9-4be3-82f2-de4a71d4484f". Ready: true. Unschedulable: false I0805 12:20:15.587881 4338 utils.go:230] Node "7bf2bf8a-30ed-45b1-8210-ae8529937d15". Ready: true. Unschedulable: true I0805 12:20:15.587889 4338 utils.go:230] Node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767". Ready: true. Unschedulable: false I0805 12:20:15.587898 4338 utils.go:230] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209". Ready: true. Unschedulable: false I0805 12:20:15.587907 4338 utils.go:230] Node "a26f3f1c-6dcb-417e-b3bd-21e4c525b777". Ready: true. Unschedulable: false I0805 12:20:15.587920 4338 utils.go:230] Node "d22a1301-2ddf-4630-82d8-e242d9c7b6df". Ready: true. Unschedulable: false I0805 12:20:15.587948 4338 utils.go:230] Node "minikube". Ready: true. Unschedulable: false I0805 12:20:15.599034 4338 utils.go:86] Cluster size is 9 nodes I0805 12:20:20.599629 4338 utils.go:237] Cluster size expected to be 5 nodes I0805 12:20:20.603995 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1 I0805 12:20:20.604020 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1 I0805 12:20:20.604031 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1 I0805 12:20:20.604039 4338 utils.go:98] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1 I0805 12:20:20.607109 4338 utils.go:230] Node "4936be43-004e-4f92-970e-e9a1af845a6c". Ready: true. Unschedulable: false I0805 12:20:20.607133 4338 utils.go:230] Node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767". Ready: true. Unschedulable: false I0805 12:20:20.607142 4338 utils.go:230] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209". Ready: true. Unschedulable: false I0805 12:20:20.607150 4338 utils.go:230] Node "a26f3f1c-6dcb-417e-b3bd-21e4c525b777". Ready: true. Unschedulable: false I0805 12:20:20.607158 4338 utils.go:230] Node "minikube". Ready: true. Unschedulable: false I0805 12:20:20.609992 4338 utils.go:86] Cluster size is 5 nodes I0805 12:20:20.610012 4338 utils.go:255] waiting for all nodes to be ready I0805 12:20:20.616199 4338 utils.go:260] waiting for all nodes to be schedulable I0805 12:20:20.619874 4338 utils.go:287] Node "4936be43-004e-4f92-970e-e9a1af845a6c" is schedulable I0805 12:20:20.619894 4338 utils.go:287] Node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767" is schedulable I0805 12:20:20.619929 4338 utils.go:287] Node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" is schedulable I0805 12:20:20.619940 4338 utils.go:287] Node "a26f3f1c-6dcb-417e-b3bd-21e4c525b777" is schedulable I0805 12:20:20.619948 4338 utils.go:287] Node "minikube" is schedulable I0805 12:20:20.619956 4338 utils.go:265] waiting for each node to be backed by a machine I0805 12:20:20.626212 4338 utils.go:46] Expecting the same number of machines and nodes, have 5 nodes and 5 machines I0805 12:20:20.626234 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-blue-xvlzk" is linked to node "a26f3f1c-6dcb-417e-b3bd-21e4c525b777" I0805 12:20:20.626243 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-green-7sjsb" is linked to node "8fc64e5f-dabb-415c-b7d7-86f5c944c209" I0805 12:20:20.626249 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-red-ddndz" is linked to node "4936be43-004e-4f92-970e-e9a1af845a6c" I0805 12:20:20.626256 4338 utils.go:69] Machine "kubemark-actuator-testing-machineset-w2dl6" is linked to node "7bf38d09-ca3d-4692-9a8a-9f1eca4be767" I0805 12:20:20.626262 4338 utils.go:69] Machine "minikube-static-machine" is linked to node "minikube" • [SLOW TEST:15.238 seconds] [Feature:Machines] Managed cluster should /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:127 grow and decrease when scaling different machineSets simultaneously /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:300 ------------------------------ [Feature:Machines] Managed cluster should drain node before removing machine resource /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:355 I0805 12:20:20.626363 4338 framework.go:397] >>> kubeConfig: /root/.kube/config STEP: Taking the first worker machineset (assuming only worker machines are backed by machinesets) STEP: Creating two new machines, one for node about to be drained, other for moving workload from drained node STEP: Waiting until both new nodes are ready E0805 12:20:20.738150 4338 utils.go:338] Expecting 2 nodes with map[string]string{"node-draining-test":"fc2e7b70-b77a-11e9-8c83-0ac1370f6c5a", "node-role.kubernetes.io/worker":""} labels in Ready state, got 0 I0805 12:20:25.741645 4338 utils.go:342] Expected number (2) of nodes with map[node-draining-test:fc2e7b70-b77a-11e9-8c83-0ac1370f6c5a node-role.kubernetes.io/worker:] label in Ready state found STEP: Creating RC with workload STEP: Creating PDB for RC STEP: Wait until all replicas are ready I0805 12:20:25.769948 4338 utils.go:390] Waiting for at least one RC ready replica, ReadyReplicas: 0, Replicas: 0 I0805 12:20:30.772151 4338 utils.go:390] Waiting for at least one RC ready replica, ReadyReplicas: 0, Replicas: 20 I0805 12:20:35.772111 4338 utils.go:393] Waiting for RC ready replicas, ReadyReplicas: 20, Replicas: 20 STEP: Delete machine to trigger node draining STEP: Observing and verifying node draining E0805 12:20:35.782831 4338 utils.go:425] Node "b6e1acde-a181-4b4d-912f-75df5c8b1565" is expected to be marked as unschedulable, it is not I0805 12:20:40.787305 4338 utils.go:429] Node "b6e1acde-a181-4b4d-912f-75df5c8b1565" is mark unschedulable as expected I0805 12:20:40.799129 4338 utils.go:448] Have 9 pods scheduled to node "b6e1acde-a181-4b4d-912f-75df5c8b1565" I0805 12:20:40.800673 4338 utils.go:464] RC ReadyReplicas: 20, Replicas: 20 I0805 12:20:40.800693 4338 utils.go:474] Expecting at most 2 pods to be scheduled to drained node "b6e1acde-a181-4b4d-912f-75df5c8b1565", got 9 I0805 12:20:45.788315 4338 utils.go:429] Node "b6e1acde-a181-4b4d-912f-75df5c8b1565" is mark unschedulable as expected I0805 12:20:45.796862 4338 utils.go:448] Have 8 pods scheduled to node "b6e1acde-a181-4b4d-912f-75df5c8b1565" I0805 12:20:45.799152 4338 utils.go:464] RC ReadyReplicas: 20, Replicas: 20 I0805 12:20:45.799177 4338 utils.go:474] Expecting at most 2 pods to be scheduled to drained node "b6e1acde-a181-4b4d-912f-75df5c8b1565", got 8 I0805 12:20:50.789069 4338 utils.go:429] Node "b6e1acde-a181-4b4d-912f-75df5c8b1565" is mark unschedulable as expected I0805 12:20:50.799377 4338 utils.go:448] Have 7 pods scheduled to node "b6e1acde-a181-4b4d-912f-75df5c8b1565" I0805 12:20:50.802413 4338 utils.go:464] RC ReadyReplicas: 20, Replicas: 20 I0805 12:20:50.802434 4338 utils.go:474] Expecting at most 2 pods to be scheduled to drained node "b6e1acde-a181-4b4d-912f-75df5c8b1565", got 7 I0805 12:20:55.786978 4338 utils.go:429] Node "b6e1acde-a181-4b4d-912f-75df5c8b1565" is mark unschedulable as expected I0805 12:20:55.792635 4338 utils.go:448] Have 6 pods scheduled to node "b6e1acde-a181-4b4d-912f-75df5c8b1565" I0805 12:20:55.794128 4338 utils.go:464] RC ReadyReplicas: 20, Replicas: 20 I0805 12:20:55.794149 4338 utils.go:474] Expecting at most 2 pods to be scheduled to drained node "b6e1acde-a181-4b4d-912f-75df5c8b1565", got 6 I0805 12:21:00.786980 4338 utils.go:429] Node "b6e1acde-a181-4b4d-912f-75df5c8b1565" is mark unschedulable as expected I0805 12:21:00.793508 4338 utils.go:448] Have 4 pods scheduled to node "b6e1acde-a181-4b4d-912f-75df5c8b1565" I0805 12:21:00.795097 4338 utils.go:464] RC ReadyReplicas: 20, Replicas: 20 I0805 12:21:00.795119 4338 utils.go:474] Expecting at most 2 pods to be scheduled to drained node "b6e1acde-a181-4b4d-912f-75df5c8b1565", got 4 I0805 12:21:05.786956 4338 utils.go:429] Node "b6e1acde-a181-4b4d-912f-75df5c8b1565" is mark unschedulable as expected I0805 12:21:05.793225 4338 utils.go:448] Have 3 pods scheduled to node "b6e1acde-a181-4b4d-912f-75df5c8b1565" I0805 12:21:05.794761 4338 utils.go:464] RC ReadyReplicas: 20, Replicas: 20 I0805 12:21:05.794784 4338 utils.go:474] Expecting at most 2 pods to be scheduled to drained node "b6e1acde-a181-4b4d-912f-75df5c8b1565", got 3 I0805 12:21:10.786935 4338 utils.go:429] Node "b6e1acde-a181-4b4d-912f-75df5c8b1565" is mark unschedulable as expected I0805 12:21:10.792753 4338 utils.go:448] Have 2 pods scheduled to node "b6e1acde-a181-4b4d-912f-75df5c8b1565" I0805 12:21:10.794285 4338 utils.go:464] RC ReadyReplicas: 20, Replicas: 20 I0805 12:21:10.794306 4338 utils.go:478] Expected result: all pods from the RC up to last one or two got scheduled to a different node while respecting PDB STEP: Validating the machine is deleted E0805 12:21:10.795925 4338 infra.go:460] Machine "machine1" not yet deleted E0805 12:21:15.798759 4338 infra.go:460] Machine "machine1" not yet deleted I0805 12:21:20.799039 4338 infra.go:469] Machine "machine1" successfully deleted STEP: Validate underlying node is removed as well I0805 12:21:20.800539 4338 utils.go:503] Node "b6e1acde-a181-4b4d-912f-75df5c8b1565" successfully deleted I0805 12:21:20.800564 4338 infra.go:365] Deleting object "machine2" I0805 12:21:20.804840 4338 infra.go:365] Deleting object "rc" I0805 12:21:20.808138 4338 infra.go:365] Deleting object "pdb" E0805 12:21:20.835509 4338 utils.go:369] Expecting to found 0 nodes with map[string]string{"node-draining-test":"fc2e7b70-b77a-11e9-8c83-0ac1370f6c5a", "node-role.kubernetes.io/worker":""} labels , got 1 E0805 12:21:25.838432 4338 utils.go:369] Expecting to found 0 nodes with map[string]string{"node-role.kubernetes.io/worker":"", "node-draining-test":"fc2e7b70-b77a-11e9-8c83-0ac1370f6c5a"} labels , got 1 E0805 12:21:30.838037 4338 utils.go:369] Expecting to found 0 nodes with map[string]string{"node-role.kubernetes.io/worker":"", "node-draining-test":"fc2e7b70-b77a-11e9-8c83-0ac1370f6c5a"} labels , got 1 E0805 12:21:35.840111 4338 utils.go:369] Expecting to found 0 nodes with map[string]string{"node-draining-test":"fc2e7b70-b77a-11e9-8c83-0ac1370f6c5a", "node-role.kubernetes.io/worker":""} labels , got 1 I0805 12:21:40.838141 4338 utils.go:373] Found 0 number of nodes with map[node-role.kubernetes.io/worker: node-draining-test:fc2e7b70-b77a-11e9-8c83-0ac1370f6c5a] label as expected • [SLOW TEST:80.212 seconds] [Feature:Machines] Managed cluster should /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:127 drain node before removing machine resource /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:355 ------------------------------ [Feature:Machines] Managed cluster should reject invalid machinesets /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:480 I0805 12:21:40.838256 4338 framework.go:397] >>> kubeConfig: /root/.kube/config STEP: Creating invalid machineset STEP: Waiting for ReconcileError MachineSet event I0805 12:21:40.894333 4338 infra.go:499] Fetching ReconcileError MachineSet invalid-machineset event I0805 12:21:45.945581 4338 infra.go:499] Fetching ReconcileError MachineSet invalid-machineset event I0805 12:21:45.945624 4338 infra.go:505] Found ReconcileError event for "invalid-machineset" machine set with the following message: "invalid-machineset" machineset validation failed: spec.template.metadata.labels: Invalid value: map[string]string{"big-kitty":"i-am-bit-kitty"}: `selector` does not match template `labels` STEP: Verify no machine from "invalid-machineset" machineset were created I0805 12:21:45.949583 4338 infra.go:521] Have 0 machines generated from "invalid-machineset" machineset STEP: Deleting invalid machineset • [SLOW TEST:5.116 seconds] [Feature:Machines] Managed cluster should /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:127 reject invalid machinesets /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:480 ------------------------------ [Feature:Operators] Machine API operator deployment should be available /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:18 I0805 12:21:45.954223 4338 framework.go:397] >>> kubeConfig: /root/.kube/config I0805 12:21:45.968248 4338 utils.go:58] Deployment "machine-api-operator" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0) • ------------------------------ [Feature:Operators] Machine API operator deployment should reconcile controllers deployment /data/src/github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:25 I0805 12:21:45.968336 4338 framework.go:397] >>> kubeConfig: /root/.kube/config STEP: checking deployment "machine-api-controllers" is available I0805 12:21:45.986147 4338 utils.go:58] Deployment "machine-api-controllers" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0) STEP: deleting deployment "machine-api-controllers" STEP: checking deployment "machine-api-controllers" is available again E0805 12:21:45.996663 4338 utils.go:27] Error querying api for Deployment object "machine-api-controllers": deployments.apps "machine-api-controllers" not found, retrying... E0805 12:21:46.999090 4338 utils.go:55] Deployment "machine-api-controllers" is not available. Status: (replicas: 1, updated: 1, ready: 0, available: 0, unavailable: 1) I0805 12:21:48.001925 4338 utils.go:58] Deployment "machine-api-controllers" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0) • Ran 13 of 15 Specs in 262.369 seconds SUCCESS! -- 13 Passed | 0 Failed | 0 Pending | 2 Skipped --- PASS: TestE2E (262.37s) PASS ok github.com/openshift/cluster-api-provider-kubemark/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e 262.420s + set +o xtrace ########## FINISHED STAGE: SUCCESS: RUN E2E TESTS [00h 05m 12s] ########## [PostBuildScript] - Executing post build scripts. [workspace] $ /bin/bash /tmp/jenkins8205832178384652222.sh ########## STARTING STAGE: DOWNLOAD ARTIFACTS FROM THE REMOTE HOST ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config + trap 'exit 0' EXIT ++ pwd + ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/gathered + rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/gathered + mkdir -p /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/gathered + tree /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/gathered /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/gathered 0 directories, 0 files + exit 0 [workspace] $ /bin/bash /tmp/jenkins236036839613863995.sh ########## STARTING STAGE: GENERATE ARTIFACTS FROM THE REMOTE HOST ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config + trap 'exit 0' EXIT ++ pwd + ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/generated + rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/generated + mkdir /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/generated + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo docker version && sudo docker info && sudo docker images && sudo docker ps -a 2>&1' WARNING: You're not using the default seccomp profile + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo cat /etc/sysconfig/docker /etc/sysconfig/docker-network /etc/sysconfig/docker-storage /etc/sysconfig/docker-storage-setup /etc/systemd/system/docker.service 2>&1' + true + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo find /var/lib/docker/containers -name *.log | sudo xargs tail -vn +1 2>&1' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo ausearch -m AVC -m SELINUX_ERR -m USER_AVC 2>&1' + true + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo df -T -h && sudo pvs && sudo vgs && sudo lvs && sudo findmnt --all 2>&1' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo yum list installed 2>&1' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl --dmesg --no-pager --all --lines=all 2>&1' + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl _PID=1 --no-pager --all --lines=all 2>&1' + tree /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/generated /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/generated ├── avc_denials.log ├── containers.log ├── dmesg.log ├── docker.config ├── docker.info ├── filesystem.info ├── installed_packages.log └── pid1.journal 0 directories, 8 files + exit 0 [workspace] $ /bin/bash /tmp/jenkins1920656577735109210.sh ########## STARTING STAGE: FETCH SYSTEMD JOURNALS FROM THE REMOTE HOST ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config + trap 'exit 0' EXIT ++ pwd + ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/journals + rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/journals + mkdir /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/journals + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit docker.service --no-pager --all --lines=all + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit dnsmasq.service --no-pager --all --lines=all + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all + tree /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/journals /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/artifacts/journals ├── dnsmasq.service ├── docker.service └── systemd-journald.service 0 directories, 3 files + exit 0 [workspace] $ /bin/bash /tmp/jenkins2883404613566693687.sh ########## STARTING STAGE: ASSEMBLE GCS OUTPUT ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config + trap 'exit 0' EXIT + mkdir -p gcs/artifacts gcs/artifacts/generated gcs/artifacts/journals gcs/artifacts/gathered ++ python -c 'import json; import urllib; print json.load(urllib.urlopen('\''https://ci.openshift.redhat.com/jenkins/job/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/54/api/json'\''))['\''result'\'']' + result=SUCCESS + cat ++ date +%s + cat /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/builds/54/log + cp artifacts/generated/avc_denials.log artifacts/generated/containers.log artifacts/generated/dmesg.log artifacts/generated/docker.config artifacts/generated/docker.info artifacts/generated/filesystem.info artifacts/generated/installed_packages.log artifacts/generated/pid1.journal gcs/artifacts/generated/ + cp artifacts/journals/dnsmasq.service artifacts/journals/docker.service artifacts/journals/systemd-journald.service gcs/artifacts/journals/ + cp -r 'artifacts/gathered/*' gcs/artifacts/ cp: cannot stat ‘artifacts/gathered/*’: No such file or directory ++ export status=FAILURE ++ status=FAILURE + exit 0 [workspace] $ /bin/bash /tmp/jenkins1440782330994347011.sh ########## STARTING STAGE: PUSH THE ARTIFACTS AND METADATA ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ mktemp + script=/tmp/tmp.70U5QlAwZU + cat + chmod +x /tmp/tmp.70U5QlAwZU + scp -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.70U5QlAwZU openshiftdevel:/tmp/tmp.70U5QlAwZU + ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 300 /tmp/tmp.70U5QlAwZU"' + cd /home/origin + trap 'exit 0' EXIT + [[ -n {"type":"presubmit","job":"pull-ci-openshift-cluster-api-provider-kubemark-master-e2e","buildid":"1158347496229965824","prowjobid":"d2ff9911-b778-11e9-afc1-0a58ac10c12b","refs":{"org":"openshift","repo":"cluster-api-provider-kubemark","repo_link":"https://github.com/openshift/cluster-api-provider-kubemark","base_ref":"master","base_sha":"58acd13e6caa24f1f47942e2628a692fb5a8fdeb","base_link":"https://github.com/openshift/cluster-api-provider-kubemark/commit/58acd13e6caa24f1f47942e2628a692fb5a8fdeb","pulls":[{"number":27,"author":"frobware","sha":"e43c339c9c23beec199855e697eed29abdd194f0","link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27","commit_link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27/commits/e43c339c9c23beec199855e697eed29abdd194f0","author_link":"https://github.com/frobware"}]}} ]] ++ jq --compact-output '.buildid |= "54"' + JOB_SPEC='{"type":"presubmit","job":"pull-ci-openshift-cluster-api-provider-kubemark-master-e2e","buildid":"54","prowjobid":"d2ff9911-b778-11e9-afc1-0a58ac10c12b","refs":{"org":"openshift","repo":"cluster-api-provider-kubemark","repo_link":"https://github.com/openshift/cluster-api-provider-kubemark","base_ref":"master","base_sha":"58acd13e6caa24f1f47942e2628a692fb5a8fdeb","base_link":"https://github.com/openshift/cluster-api-provider-kubemark/commit/58acd13e6caa24f1f47942e2628a692fb5a8fdeb","pulls":[{"number":27,"author":"frobware","sha":"e43c339c9c23beec199855e697eed29abdd194f0","link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27","commit_link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27/commits/e43c339c9c23beec199855e697eed29abdd194f0","author_link":"https://github.com/frobware"}]}}' + docker run -e 'JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-cluster-api-provider-kubemark-master-e2e","buildid":"54","prowjobid":"d2ff9911-b778-11e9-afc1-0a58ac10c12b","refs":{"org":"openshift","repo":"cluster-api-provider-kubemark","repo_link":"https://github.com/openshift/cluster-api-provider-kubemark","base_ref":"master","base_sha":"58acd13e6caa24f1f47942e2628a692fb5a8fdeb","base_link":"https://github.com/openshift/cluster-api-provider-kubemark/commit/58acd13e6caa24f1f47942e2628a692fb5a8fdeb","pulls":[{"number":27,"author":"frobware","sha":"e43c339c9c23beec199855e697eed29abdd194f0","link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27","commit_link":"https://github.com/openshift/cluster-api-provider-kubemark/pull/27/commits/e43c339c9c23beec199855e697eed29abdd194f0","author_link":"https://github.com/frobware"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/gcsupload:latest --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin '/data/gcs/*' Unable to find image 'registry.svc.ci.openshift.org/ci/gcsupload:latest' locally Trying to pull repository registry.svc.ci.openshift.org/ci/gcsupload ... latest: Pulling from registry.svc.ci.openshift.org/ci/gcsupload a073c86ecf9e: Already exists cc3fc741b1a9: Already exists d1873f49e953: Pulling fs layer 85cea451eec0: Pulling fs layer 85cea451eec0: Verifying Checksum 85cea451eec0: Download complete d1873f49e953: Verifying Checksum d1873f49e953: Download complete d1873f49e953: Pull complete 85cea451eec0: Pull complete Digest: sha256:d71cafc405d8ab374335d4477fbbecf3fa8557824fc97af74a4395db99280261 Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/gcsupload:latest {"component":"gcsupload","file":"prow/gcsupload/run.go:107","func":"k8s.io/test-infra/prow/gcsupload.Options.assembleTargets","level":"warning","msg":"Encountered error in resolving items to upload for /data/gcs/*: stat /data/gcs/*: no such file or directory","time":"2019-08-05T12:22:09Z"} {"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/54.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-08-05T12:22:09Z"} {"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-08-05T12:22:09Z"} {"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-kubemark/27/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-08-05T12:22:09Z"} {"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-08-05T12:22:09Z"} {"component":"gcsupload","dest":"pr-logs/pull/openshift_cluster-api-provider-kubemark/27/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-08-05T12:22:10Z"} {"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/54.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-08-05T12:22:10Z"} {"component":"gcsupload","file":"prow/gcsupload/run.go:65","func":"k8s.io/test-infra/prow/gcsupload.Options.Run","level":"info","msg":"Finished upload to GCS","time":"2019-08-05T12:22:10Z"} + exit 0 + set +o xtrace ########## FINISHED STAGE: SUCCESS: PUSH THE ARTIFACTS AND METADATA [00h 00m 06s] ########## [workspace] $ /bin/bash /tmp/jenkins7705541209687727224.sh ########## STARTING STAGE: DEPROVISION CLOUD RESOURCES ########## + [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ]] + source /var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/activate ++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed ++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin ++ unset PYTHON_HOME ++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config ++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config + oct deprovision PLAYBOOK: main.yml ************************************************************* 4 plays in /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml PLAY [ensure we have the parameters necessary to deprovision virtual hosts] **** TASK [ensure all required variables are set] *********************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:9 skipping: [localhost] => (item=origin_ci_inventory_dir) => { "changed": false, "generated_timestamp": "2019-08-05 08:22:11.287986", "item": "origin_ci_inventory_dir", "skip_reason": "Conditional check failed", "skipped": true } skipping: [localhost] => (item=origin_ci_aws_region) => { "changed": false, "generated_timestamp": "2019-08-05 08:22:11.292493", "item": "origin_ci_aws_region", "skip_reason": "Conditional check failed", "skipped": true } PLAY [deprovision virtual hosts in EC2] **************************************** TASK [Gathering Facts] ********************************************************* ok: [localhost] TASK [deprovision a virtual EC2 host] ****************************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:28 included: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml for localhost TASK [update the SSH configuration to remove AWS EC2 specifics] **************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:2 ok: [localhost] => { "changed": false, "generated_timestamp": "2019-08-05 08:22:12.307541", "msg": "" } TASK [rename EC2 instance for termination reaper] ****************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:8 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-08-05 08:22:13.090520", "msg": "Tags {'Name': 'oct-terminate'} created for resource i-0af26858e335b2afe." } TASK [tear down the EC2 instance] ********************************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:15 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-08-05 08:22:14.243333", "instance_ids": [ "i-0af26858e335b2afe" ], "instances": [ { "ami_launch_index": "0", "architecture": "x86_64", "block_device_mapping": { "/dev/sda1": { "delete_on_termination": true, "status": "attached", "volume_id": "vol-0d4a41b559474cec4" }, "/dev/sdb": { "delete_on_termination": true, "status": "attached", "volume_id": "vol-0d4f042e0d382af62" } }, "dns_name": "ec2-34-229-130-184.compute-1.amazonaws.com", "ebs_optimized": false, "groups": { "sg-7e73221a": "default" }, "hypervisor": "xen", "id": "i-0af26858e335b2afe", "image_id": "ami-0b77b87a37c3e662c", "instance_type": "m4.xlarge", "kernel": null, "key_name": "libra", "launch_time": "2019-08-05T12:02:40.000Z", "placement": "us-east-1c", "private_dns_name": "ip-172-18-31-47.ec2.internal", "private_ip": "172.18.31.47", "public_dns_name": "ec2-34-229-130-184.compute-1.amazonaws.com", "public_ip": "34.229.130.184", "ramdisk": null, "region": "us-east-1", "root_device_name": "/dev/sda1", "root_device_type": "ebs", "state": "running", "state_code": 16, "tags": { "Name": "oct-terminate", "openshift_etcd": "", "openshift_master": "", "openshift_node": "" }, "tenancy": "default", "virtualization_type": "hvm" } ], "tagged_instances": [] } TASK [remove the serialized host variables] ************************************ task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:22 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-08-05 08:22:14.491099", "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.31.47.yml", "state": "absent" } PLAY [deprovision virtual hosts locally manged by Vagrant] ********************* TASK [Gathering Facts] ********************************************************* ok: [localhost] PLAY [clean up local configuration for deprovisioned instances] **************** TASK [remove inventory configuration directory] ******************************** task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:61 changed: [localhost] => { "changed": true, "generated_timestamp": "2019-08-05 08:22:15.005155", "path": "/var/lib/jenkins/jobs/pull-ci-openshift-cluster-api-provider-kubemark-master-e2e/workspace/.config/origin-ci-tool/inventory", "state": "absent" } PLAY RECAP ********************************************************************* localhost : ok=8 changed=4 unreachable=0 failed=0 + set +o xtrace ########## FINISHED STAGE: SUCCESS: DEPROVISION CLOUD RESOURCES [00h 00m 05s] ########## Archiving artifacts Recording test results [WS-CLEANUP] Deleting project workspace... [WS-CLEANUP] Deferred wipeout is used... [WS-CLEANUP] done Finished: SUCCESS