Console Output
Started by user OpenShift CI Robot
[EnvInject] - Loading node environment variables.
Building in workspace /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace
[WS-CLEANUP] Deleting project workspace...
[WS-CLEANUP] Deferred wipeout is used...
[workspace] $ /bin/bash /tmp/jenkins2609453306702544274.sh
########## STARTING STAGE: INSTALL THE ORIGIN-CI-TOOL ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
++ readlink /var/lib/jenkins/origin-ci-tool/latest
+ latest=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
+ touch /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
+ cp /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin/activate /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
+ cat
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
+ mkdir -p /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
+ rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool
+ oct configure ansible-client verbosity 2
Option verbosity updated to be 2.
+ oct configure aws-client keypair_name libra
Option keypair_name updated to be libra.
+ oct configure aws-client private_key_path /var/lib/jenkins/.ssh/devenv.pem
Option private_key_path updated to be /var/lib/jenkins/.ssh/devenv.pem.
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL THE ORIGIN-CI-TOOL [00h 00m 02s] ##########
[workspace] $ /bin/bash /tmp/jenkins690907187694611320.sh
########## STARTING STAGE: PROVISION CLOUD RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
+ oct provision remote all-in-one --os rhel --stage base --provider aws --discrete-ssh-config --name pull-ci-openshift-machine-api-operator-master-e2e_716
PLAYBOOK: aws-up.yml ***********************************************************
2 plays in /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml
PLAY [ensure we have the parameters necessary to bring up the AWS EC2 instance] ***
TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml:9
skipping: [localhost] => (item=origin_ci_inventory_dir) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.058798",
"item": "origin_ci_inventory_dir",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_keypair_name) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.062427",
"item": "origin_ci_aws_keypair_name",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_private_key_path) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.065522",
"item": "origin_ci_aws_private_key_path",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_region) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.068791",
"item": "origin_ci_aws_region",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_ami_tags) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.071861",
"item": "origin_ci_aws_ami_tags",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_instance_name) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.074924",
"item": "origin_ci_aws_instance_name",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_instance_type) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.077990",
"item": "origin_ci_aws_master_instance_type",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_identifying_tag_key) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.082220",
"item": "origin_ci_aws_identifying_tag_key",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_hostname) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.085280",
"item": "origin_ci_aws_hostname",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_ssh_config_strategy) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.088318",
"item": "origin_ci_ssh_config_strategy",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=openshift_schedulable) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.092523",
"item": "openshift_schedulable",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=openshift_node_labels) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.096117",
"item": "openshift_node_labels",
"skip_reason": "Conditional check failed",
"skipped": true
}
TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml:27
skipping: [localhost] => (item=origin_ci_aws_master_subnet) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.129332",
"item": "origin_ci_aws_master_subnet",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_etcd_security_group) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.134012",
"item": "origin_ci_aws_etcd_security_group",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_node_security_group) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.138165",
"item": "origin_ci_aws_node_security_group",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_security_group) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.143530",
"item": "origin_ci_aws_master_security_group",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_external_elb_security_group) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.147746",
"item": "origin_ci_aws_master_external_elb_security_group",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_internal_elb_security_group) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.153108",
"item": "origin_ci_aws_master_internal_elb_security_group",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_router_security_group) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.157366",
"item": "origin_ci_aws_router_security_group",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_router_elb_security_group) => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.163313",
"item": "origin_ci_aws_router_elb_security_group",
"skip_reason": "Conditional check failed",
"skipped": true
}
PLAY [provision an AWS EC2 instance] *******************************************
TASK [Gathering Facts] *********************************************************
ok: [localhost]
TASK [inventory : initialize the inventory directory] **************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:2
ok: [localhost] => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:23.956164",
"gid": 995,
"group": "jenkins",
"mode": "0755",
"owner": "jenkins",
"path": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory",
"secontext": "system_u:object_r:var_lib_t:s0",
"size": 6,
"state": "directory",
"uid": 997
}
TASK [inventory : add the nested group mapping] ********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:7
changed: [localhost] => {
"changed": true,
"checksum": "18aaee00994df38cc3a63b635893175235331a9c",
"dest": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/nested_group_mappings",
"generated_timestamp": "2019-09-06 10:54:24.419671",
"gid": 995,
"group": "jenkins",
"md5sum": "b30c3226ea63efa3ff9c5e346c14a16e",
"mode": "0644",
"owner": "jenkins",
"secontext": "system_u:object_r:var_lib_t:s0",
"size": 93,
"src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567781664.2-110173740647490/source",
"state": "file",
"uid": 997
}
TASK [inventory : initialize the OSEv3 group variables directory] **************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:12
changed: [localhost] => {
"changed": true,
"generated_timestamp": "2019-09-06 10:54:24.583878",
"gid": 995,
"group": "jenkins",
"mode": "0755",
"owner": "jenkins",
"path": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/group_vars/OSEv3",
"secontext": "system_u:object_r:var_lib_t:s0",
"size": 6,
"state": "directory",
"uid": 997
}
TASK [inventory : initialize the host variables directory] *********************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:17
changed: [localhost] => {
"changed": true,
"generated_timestamp": "2019-09-06 10:54:24.752192",
"gid": 995,
"group": "jenkins",
"mode": "0755",
"owner": "jenkins",
"path": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/host_vars",
"secontext": "system_u:object_r:var_lib_t:s0",
"size": 6,
"state": "directory",
"uid": 997
}
TASK [inventory : add the default Origin installation configuration] ***********
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:22
changed: [localhost] => {
"changed": true,
"checksum": "4c06ba508f055c20f13426e8587342e8765a7b66",
"dest": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/group_vars/OSEv3/general.yml",
"generated_timestamp": "2019-09-06 10:54:25.046171",
"gid": 995,
"group": "jenkins",
"md5sum": "8aec71c75f7d512b278ae7c6f2959b12",
"mode": "0644",
"owner": "jenkins",
"secontext": "system_u:object_r:var_lib_t:s0",
"size": 331,
"src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567781664.91-108422028835334/source",
"state": "file",
"uid": 997
}
TASK [aws-up : determine if we are inside AWS EC2] *****************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:2
changed: [localhost] => {
"changed": true,
"cmd": [
"curl",
"-s",
"http://instance-data.ec2.internal"
],
"delta": "0:00:00.015330",
"end": "2019-09-06 10:54:25.272763",
"failed": false,
"failed_when_result": false,
"generated_timestamp": "2019-09-06 10:54:25.289640",
"rc": 0,
"start": "2019-09-06 10:54:25.257433",
"stderr": [],
"stdout": [
"1.0",
"2007-01-19",
"2007-03-01",
"2007-08-29",
"2007-10-10",
"2007-12-15",
"2008-02-01",
"2008-09-01",
"2009-04-04",
"2011-01-01",
"2011-05-01",
"2012-01-12",
"2014-02-25",
"2014-11-05",
"2015-10-20",
"2016-04-19",
"2016-06-30",
"2016-09-02",
"2018-03-28",
"2018-08-17",
"2018-09-24",
"latest"
],
"warnings": [
"Consider using get_url or uri module rather than running curl"
]
}
[WARNING]: Consider using get_url or uri module rather than running curl
TASK [aws-up : configure EC2 parameters for inventory when controlling from inside EC2] ***
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:7
ok: [localhost] => {
"ansible_facts": {
"origin_ci_aws_destination_variable": "private_dns_name",
"origin_ci_aws_host_address_variable": "private_ip",
"origin_ci_aws_vpc_destination_variable": "private_ip_address"
},
"changed": false,
"generated_timestamp": "2019-09-06 10:54:25.328095"
}
TASK [aws-up : determine where to put the AWS API cache] ***********************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:14
ok: [localhost] => {
"ansible_facts": {
"origin_ci_aws_cache_dir": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ec2_cache"
},
"changed": false,
"generated_timestamp": "2019-09-06 10:54:25.363811"
}
TASK [aws-up : ensure we have a place to put the AWS API cache] ****************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:18
changed: [localhost] => {
"changed": true,
"generated_timestamp": "2019-09-06 10:54:25.527700",
"gid": 995,
"group": "jenkins",
"mode": "0755",
"owner": "jenkins",
"path": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ec2_cache",
"secontext": "system_u:object_r:var_lib_t:s0",
"size": 6,
"state": "directory",
"uid": 997
}
TASK [aws-up : place the EC2 dynamic inventory script] *************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:23
changed: [localhost] => {
"changed": true,
"checksum": "625b8af723189db3b96ba0026d0f997a0025bc47",
"dest": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/ec2.py",
"generated_timestamp": "2019-09-06 10:54:25.818870",
"gid": 995,
"group": "jenkins",
"md5sum": "cac06c14065dac74904232b89d4ba24c",
"mode": "0755",
"owner": "jenkins",
"secontext": "system_u:object_r:var_lib_t:s0",
"size": 63725,
"src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567781665.68-235483521267226/source",
"state": "file",
"uid": 997
}
TASK [aws-up : place the EC2 dynamic inventory configuration] ******************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:29
changed: [localhost] => {
"changed": true,
"checksum": "f94cf21fdbe6046b2b885ad1f93991e252011e31",
"dest": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/ec2.ini",
"generated_timestamp": "2019-09-06 10:54:26.101301",
"gid": 995,
"group": "jenkins",
"md5sum": "b483bdefdeb766739eb3202824fed18d",
"mode": "0644",
"owner": "jenkins",
"secontext": "system_u:object_r:var_lib_t:s0",
"size": 421,
"src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567781665.85-58358705775797/source",
"state": "file",
"uid": 997
}
TASK [aws-up : place the EC2 tag to group mappings] ****************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:34
changed: [localhost] => {
"changed": true,
"checksum": "b4205a33dc73f62bd4f77f35d045cf8e09ae62b0",
"dest": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/tag_to_group_mappings",
"generated_timestamp": "2019-09-06 10:54:26.387827",
"gid": 995,
"group": "jenkins",
"md5sum": "bc3a567a1b6f342e1005182efc1b66be",
"mode": "0644",
"owner": "jenkins",
"secontext": "system_u:object_r:var_lib_t:s0",
"size": 287,
"src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567781666.25-109057332304890/source",
"state": "file",
"uid": 997
}
TASK [aws-up : list available AMIs] ********************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:40
ok: [localhost] => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:28.881907",
"results": [
{
"ami_id": "ami-04f9b88b6b0571f20",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/sda1": {
"delete_on_termination": true,
"encrypted": false,
"size": 75,
"snapshot_id": "snap-0655d2d962c590c8c",
"volume_type": "gp2"
},
"/dev/sdb": {
"delete_on_termination": true,
"encrypted": false,
"size": 50,
"snapshot_id": "snap-0d86ae865b17f4def",
"volume_type": "gp2"
}
},
"creationDate": "2018-06-26T12:22:31.000Z",
"description": "OpenShift Origin development AMI on rhel at the base stage.",
"hypervisor": "xen",
"is_public": false,
"location": "531415883065/ami_build_origin_int_rhel_base_758",
"name": "ami_build_origin_int_rhel_base_758",
"owner_id": "531415883065",
"platform": null,
"root_device_name": "/dev/sda1",
"root_device_type": "ebs",
"state": "available",
"tags": {
"Name": "ami_build_origin_int_rhel_base_758",
"image_stage": "base",
"operating_system": "rhel",
"ready": "yes"
},
"virtualization_type": "hvm"
},
{
"ami_id": "ami-0b77b87a37c3e662c",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/sda1": {
"delete_on_termination": true,
"encrypted": false,
"size": 75,
"snapshot_id": "snap-02ec23d4818f2747e",
"volume_type": "gp2"
},
"/dev/sdb": {
"delete_on_termination": true,
"encrypted": false,
"size": 50,
"snapshot_id": "snap-0d8726e441d4ca329",
"volume_type": "gp2"
}
},
"creationDate": "2018-06-26T22:18:53.000Z",
"description": "OpenShift Origin development AMI on rhel at the base stage.",
"hypervisor": "xen",
"is_public": false,
"location": "531415883065/ami_build_origin_int_rhel_base_760",
"name": "ami_build_origin_int_rhel_base_760",
"owner_id": "531415883065",
"platform": null,
"root_device_name": "/dev/sda1",
"root_device_type": "ebs",
"state": "available",
"tags": {
"Name": "ami_build_origin_int_rhel_base_760",
"image_stage": "base",
"operating_system": "rhel",
"ready": "yes"
},
"virtualization_type": "hvm"
}
]
}
TASK [aws-up : choose appropriate AMIs for use] ********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:49
ok: [localhost] => (item={u'ami_id': u'ami-04f9b88b6b0571f20', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the base stage.', u'tags': {u'ready': u'yes', u'image_stage': u'base', u'Name': u'ami_build_origin_int_rhel_base_758', u'operating_system': u'rhel'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-0d86ae865b17f4def', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 50}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-0655d2d962c590c8c', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 75}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_base_758', u'is_public': False, u'creationDate': u'2018-06-26T12:22:31.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_base_758'}) => {
"ansible_facts": {
"origin_ci_aws_ami_id_candidate": "ami-04f9b88b6b0571f20"
},
"changed": false,
"generated_timestamp": "2019-09-06 10:54:28.925900",
"item": {
"ami_id": "ami-04f9b88b6b0571f20",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/sda1": {
"delete_on_termination": true,
"encrypted": false,
"size": 75,
"snapshot_id": "snap-0655d2d962c590c8c",
"volume_type": "gp2"
},
"/dev/sdb": {
"delete_on_termination": true,
"encrypted": false,
"size": 50,
"snapshot_id": "snap-0d86ae865b17f4def",
"volume_type": "gp2"
}
},
"creationDate": "2018-06-26T12:22:31.000Z",
"description": "OpenShift Origin development AMI on rhel at the base stage.",
"hypervisor": "xen",
"is_public": false,
"location": "531415883065/ami_build_origin_int_rhel_base_758",
"name": "ami_build_origin_int_rhel_base_758",
"owner_id": "531415883065",
"platform": null,
"root_device_name": "/dev/sda1",
"root_device_type": "ebs",
"state": "available",
"tags": {
"Name": "ami_build_origin_int_rhel_base_758",
"image_stage": "base",
"operating_system": "rhel",
"ready": "yes"
},
"virtualization_type": "hvm"
}
}
ok: [localhost] => (item={u'ami_id': u'ami-0b77b87a37c3e662c', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the base stage.', u'tags': {u'ready': u'yes', u'image_stage': u'base', u'Name': u'ami_build_origin_int_rhel_base_760', u'operating_system': u'rhel'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-0d8726e441d4ca329', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 50}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-02ec23d4818f2747e', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 75}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_base_760', u'is_public': False, u'creationDate': u'2018-06-26T22:18:53.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_base_760'}) => {
"ansible_facts": {
"origin_ci_aws_ami_id_candidate": "ami-0b77b87a37c3e662c"
},
"changed": false,
"generated_timestamp": "2019-09-06 10:54:28.934105",
"item": {
"ami_id": "ami-0b77b87a37c3e662c",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/sda1": {
"delete_on_termination": true,
"encrypted": false,
"size": 75,
"snapshot_id": "snap-02ec23d4818f2747e",
"volume_type": "gp2"
},
"/dev/sdb": {
"delete_on_termination": true,
"encrypted": false,
"size": 50,
"snapshot_id": "snap-0d8726e441d4ca329",
"volume_type": "gp2"
}
},
"creationDate": "2018-06-26T22:18:53.000Z",
"description": "OpenShift Origin development AMI on rhel at the base stage.",
"hypervisor": "xen",
"is_public": false,
"location": "531415883065/ami_build_origin_int_rhel_base_760",
"name": "ami_build_origin_int_rhel_base_760",
"owner_id": "531415883065",
"platform": null,
"root_device_name": "/dev/sda1",
"root_device_type": "ebs",
"state": "available",
"tags": {
"Name": "ami_build_origin_int_rhel_base_760",
"image_stage": "base",
"operating_system": "rhel",
"ready": "yes"
},
"virtualization_type": "hvm"
}
}
TASK [aws-up : determine which AMI to use] *************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:55
ok: [localhost] => {
"ansible_facts": {
"origin_ci_aws_ami_id": "ami-0b77b87a37c3e662c"
},
"changed": false,
"generated_timestamp": "2019-09-06 10:54:28.969498"
}
TASK [aws-up : determine which subnets are available] **************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:60
ok: [localhost] => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:29.621021",
"subnets": [
{
"availability_zone": "us-east-1c",
"available_ip_address_count": 4053,
"cidr_block": "172.18.16.0/20",
"default_for_az": "false",
"id": "subnet-8bdb5ac2",
"map_public_ip_on_launch": "true",
"state": "available",
"tags": {
"Name": "devenv-subnet-2",
"origin_ci_aws_cluster_component": "master_subnet"
},
"vpc_id": "vpc-69705d0c"
},
{
"availability_zone": "us-east-1d",
"available_ip_address_count": 4036,
"cidr_block": "172.18.0.0/20",
"default_for_az": "false",
"id": "subnet-cf57c596",
"map_public_ip_on_launch": "true",
"state": "available",
"tags": {
"Name": "devenv-subnet-1",
"origin_ci_aws_cluster_component": "master_subnet"
},
"vpc_id": "vpc-69705d0c"
}
]
}
TASK [aws-up : determine which subnets to use for the master] ******************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:67
ok: [localhost] => {
"ansible_facts": {
"origin_ci_aws_master_subnet_ids": [
"subnet-8bdb5ac2",
"subnet-cf57c596"
]
},
"changed": false,
"generated_timestamp": "2019-09-06 10:54:29.660871"
}
TASK [aws-up : determine which security groups are available] ******************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:72
ok: [localhost] => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:30.452406",
"security_groups": [
{
"description": "default VPC security group",
"group_id": "sg-7e73221a",
"group_name": "default",
"ip_permissions": [
{
"ip_protocol": "-1",
"ip_ranges": [],
"ipv6_ranges": [],
"prefix_list_ids": [],
"user_id_group_pairs": [
{
"group_id": "sg-7e73221a",
"user_id": "531415883065"
}
]
},
{
"from_port": 80,
"ip_protocol": "tcp",
"ip_ranges": [
{
"cidr_ip": "54.241.19.245/32"
},
{
"cidr_ip": "97.65.119.184/29"
},
{
"cidr_ip": "107.20.219.35/32"
},
{
"cidr_ip": "108.166.48.153/32"
},
{
"cidr_ip": "212.199.177.64/27"
},
{
"cidr_ip": "212.72.208.162/32"
}
],
"ipv6_ranges": [],
"prefix_list_ids": [],
"to_port": 443,
"user_id_group_pairs": []
},
{
"from_port": 53,
"ip_protocol": "tcp",
"ip_ranges": [
{
"cidr_ip": "119.254.120.64/26"
},
{
"cidr_ip": "209.132.176.0/20"
},
{
"cidr_ip": "209.132.186.34/32"
},
{
"cidr_ip": "213.175.37.10/32"
},
{
"cidr_ip": "62.40.79.66/32"
},
{
"cidr_ip": "66.187.224.0/20"
},
{
"cidr_ip": "66.187.239.0/24"
},
{
"cidr_ip": "38.140.108.0/24"
},
{
"cidr_ip": "213.175.37.9/32"
},
{
"cidr_ip": "38.99.12.232/29"
},
{
"cidr_ip": "4.14.33.72/30"
},
{
"cidr_ip": "4.14.35.88/29"
},
{
"cidr_ip": "50.227.40.96/29"
}
],
"ipv6_ranges": [],
"prefix_list_ids": [],
"to_port": 8444,
"user_id_group_pairs": []
},
{
"from_port": 22,
"ip_protocol": "tcp",
"ip_ranges": [
{
"cidr_ip": "0.0.0.0/0"
}
],
"ipv6_ranges": [],
"prefix_list_ids": [],
"to_port": 22,
"user_id_group_pairs": []
},
{
"from_port": 53,
"ip_protocol": "udp",
"ip_ranges": [
{
"cidr_ip": "209.132.176.0/20"
},
{
"cidr_ip": "66.187.224.0/20"
},
{
"cidr_ip": "66.187.239.0/24"
}
],
"ipv6_ranges": [],
"prefix_list_ids": [],
"to_port": 53,
"user_id_group_pairs": []
},
{
"from_port": 0,
"ip_protocol": "udp",
"ip_ranges": [],
"ipv6_ranges": [],
"prefix_list_ids": [],
"to_port": 65535,
"user_id_group_pairs": [
{
"group_id": "sg-0d1a5377",
"user_id": "531415883065"
},
{
"group_id": "sg-5875023f",
"user_id": "531415883065"
},
{
"group_id": "sg-7e73221a",
"user_id": "531415883065"
},
{
"group_id": "sg-e1760186",
"user_id": "531415883065"
}
]
},
{
"from_port": 3389,
"ip_protocol": "tcp",
"ip_ranges": [
{
"cidr_ip": "0.0.0.0/0"
}
],
"ipv6_ranges": [],
"prefix_list_ids": [],
"to_port": 3389,
"user_id_group_pairs": []
},
{
"from_port": -1,
"ip_protocol": "icmp",
"ip_ranges": [
{
"cidr_ip": "0.0.0.0/0"
}
],
"ipv6_ranges": [],
"prefix_list_ids": [],
"to_port": -1,
"user_id_group_pairs": []
}
],
"ip_permissions_egress": [
{
"ip_protocol": "-1",
"ip_ranges": [
{
"cidr_ip": "0.0.0.0/0"
}
],
"ipv6_ranges": [],
"prefix_list_ids": [],
"user_id_group_pairs": []
}
],
"owner_id": "531415883065",
"tags": {
"Name": "devenv-vpc",
"openshift_infra": "true",
"origin_ci_aws_cluster_component": "master_security_group"
},
"vpc_id": "vpc-69705d0c"
}
]
}
TASK [aws-up : determine which security group to use] **************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:79
ok: [localhost] => {
"ansible_facts": {
"origin_ci_aws_master_security_group_ids": [
"sg-7e73221a"
]
},
"changed": false,
"generated_timestamp": "2019-09-06 10:54:30.493234"
}
TASK [aws-up : provision an AWS EC2 instance] **********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:84
changed: [localhost] => {
"changed": true,
"generated_timestamp": "2019-09-06 10:54:53.241195",
"instance_ids": [
"i-06550787d42cc325e"
],
"instances": [
{
"ami_launch_index": "0",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/sda1": {
"delete_on_termination": true,
"status": "attached",
"volume_id": "vol-03d9d644224906960"
},
"/dev/sdb": {
"delete_on_termination": true,
"status": "attached",
"volume_id": "vol-0bbfd421d51201f8f"
}
},
"dns_name": "ec2-52-200-5-193.compute-1.amazonaws.com",
"ebs_optimized": false,
"groups": {
"sg-7e73221a": "default"
},
"hypervisor": "xen",
"id": "i-06550787d42cc325e",
"image_id": "ami-0b77b87a37c3e662c",
"instance_type": "m4.xlarge",
"kernel": null,
"key_name": "libra",
"launch_time": "2019-09-06T14:54:32.000Z",
"placement": "us-east-1c",
"private_dns_name": "ip-172-18-28-208.ec2.internal",
"private_ip": "172.18.28.208",
"public_dns_name": "ec2-52-200-5-193.compute-1.amazonaws.com",
"public_ip": "52.200.5.193",
"ramdisk": null,
"region": "us-east-1",
"root_device_name": "/dev/sda1",
"root_device_type": "ebs",
"state": "running",
"state_code": 16,
"tags": {
"Name": "pull-ci-openshift-machine-api-operator-master-e2e_716",
"openshift_etcd": "",
"openshift_master": "",
"openshift_node": ""
},
"tenancy": "default",
"virtualization_type": "hvm"
}
],
"tagged_instances": []
}
TASK [aws-up : determine the host address] *************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:110
ok: [localhost] => {
"ansible_facts": {
"origin_ci_aws_host": "172.18.28.208"
},
"changed": false,
"generated_timestamp": "2019-09-06 10:54:53.278534"
}
TASK [aws-up : determine the default user to use for SSH] **********************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:114
skipping: [localhost] => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:53.310224",
"skip_reason": "Conditional check failed",
"skipped": true
}
TASK [aws-up : determine the default user to use for SSH] **********************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:119
ok: [localhost] => {
"ansible_facts": {
"origin_ci_aws_ssh_user": "origin"
},
"changed": false,
"generated_timestamp": "2019-09-06 10:54:53.345125"
}
TASK [aws-up : update variables for the host] **********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:124
changed: [localhost] => {
"changed": true,
"checksum": "98d42c4cfc35d9a77439cd2692aafff6a7d6568f",
"dest": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.28.208.yml",
"generated_timestamp": "2019-09-06 10:54:53.636528",
"gid": 995,
"group": "jenkins",
"md5sum": "87b1cba59abb1228d1809ea4e7434b85",
"mode": "0644",
"owner": "jenkins",
"secontext": "system_u:object_r:var_lib_t:s0",
"size": 769,
"src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1567781693.5-249965212557336/source",
"state": "file",
"uid": 997
}
TASK [aws-up : determine where updated SSH configuration should go] ************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:141
ok: [localhost] => {
"ansible_facts": {
"origin_ci_ssh_config_files": [
"/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config"
]
},
"changed": false,
"generated_timestamp": "2019-09-06 10:54:53.673621"
}
TASK [aws-up : determine where updated SSH configuration should go] ************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:146
skipping: [localhost] => {
"changed": false,
"generated_timestamp": "2019-09-06 10:54:53.703676",
"skip_reason": "Conditional check failed",
"skipped": true
}
TASK [aws-up : ensure the targeted SSH configuration file exists] **************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:151
changed: [localhost] => (item=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config) => {
"changed": true,
"dest": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config",
"generated_timestamp": "2019-09-06 10:54:53.866543",
"gid": 995,
"group": "jenkins",
"item": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config",
"mode": "0644",
"owner": "jenkins",
"secontext": "system_u:object_r:var_lib_t:s0",
"size": 0,
"state": "file",
"uid": 997
}
TASK [aws-up : update the SSH configuration] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:157
changed: [localhost] => (item=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config) => {
"changed": true,
"generated_timestamp": "2019-09-06 10:54:54.145978",
"item": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config",
"msg": "Block inserted"
}
TASK [aws-up : wait for SSH to be available] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:175
ok: [localhost] => {
"changed": false,
"elapsed": 63,
"generated_timestamp": "2019-09-06 10:55:57.518832",
"path": null,
"port": 22,
"search_regex": null,
"state": "started"
}
PLAY RECAP *********************************************************************
localhost : ok=28 changed=13 unreachable=0 failed=0
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: PROVISION CLOUD RESOURCES [00h 01m 35s] ##########
[workspace] $ /bin/bash /tmp/jenkins232126286039077755.sh
########## STARTING STAGE: FORWARD GCS CREDENTIALS TO REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
+ (( i = 0 ))
+ (( i < 10 ))
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.config/gcloud/gcs-publisher-credentials.json openshiftdevel:/data/credentials.json
+ break
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: FORWARD GCS CREDENTIALS TO REMOTE HOST [00h 00m 02s] ##########
[workspace] $ /bin/bash /tmp/jenkins6929665636970042056.sh
########## STARTING STAGE: FORWARD PARAMETERS TO THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod o+rw /etc/environment
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-machine-api-operator-master-e2e","buildid":"1169987167749935104","prowjobid":"2695ec19-d0b6-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"machine-api-operator","repo_link":"https://github.com/openshift/machine-api-operator","base_ref":"master","base_sha":"474e14e4965a8c5e6788417c851ccc7fad1acb3a","base_link":"https://github.com/openshift/machine-api-operator/commit/474e14e4965a8c5e6788417c851ccc7fad1acb3a","pulls":[{"number":389,"author":"sadasu","sha":"229c7ea627e98ef3b7c1927a25352d366fea7023","link":"https://github.com/openshift/machine-api-operator/pull/389","commit_link":"https://github.com/openshift/machine-api-operator/pull/389/commits/229c7ea627e98ef3b7c1927a25352d366fea7023","author_link":"https://github.com/sadasu"}]}}'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''buildId='\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_ID=1169987167749935104'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''REPO_OWNER=openshift'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''REPO_NAME=machine-api-operator'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_BASE_REF=master'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_BASE_SHA=474e14e4965a8c5e6788417c851ccc7fad1acb3a'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_REFS=master:474e14e4965a8c5e6788417c851ccc7fad1acb3a,389:229c7ea627e98ef3b7c1927a25352d366fea7023'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_NUMBER=389'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''PULL_PULL_SHA=229c7ea627e98ef3b7c1927a25352d366fea7023'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-machine-api-operator-master-e2e","buildid":"1169987167749935104","prowjobid":"2695ec19-d0b6-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"machine-api-operator","repo_link":"https://github.com/openshift/machine-api-operator","base_ref":"master","base_sha":"474e14e4965a8c5e6788417c851ccc7fad1acb3a","base_link":"https://github.com/openshift/machine-api-operator/commit/474e14e4965a8c5e6788417c851ccc7fad1acb3a","pulls":[{"number":389,"author":"sadasu","sha":"229c7ea627e98ef3b7c1927a25352d366fea7023","link":"https://github.com/openshift/machine-api-operator/pull/389","commit_link":"https://github.com/openshift/machine-api-operator/pull/389/commits/229c7ea627e98ef3b7c1927a25352d366fea7023","author_link":"https://github.com/sadasu"}]}}'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_NUMBER=716'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''CLONEREFS_ARGS='\'' >> /etc/environment'
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: FORWARD PARAMETERS TO THE REMOTE HOST [00h 00m 05s] ##########
[workspace] $ /bin/bash /tmp/jenkins5952074927674220828.sh
########## STARTING STAGE: SYNC REPOSITORIES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.SGyLBz8p28
+ cat
+ chmod +x /tmp/tmp.SGyLBz8p28
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.SGyLBz8p28 openshiftdevel:/tmp/tmp.SGyLBz8p28
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.SGyLBz8p28"'
+ cd /home/origin
++ jq --compact-output '.buildid |= "716"'
+ JOB_SPEC='{"type":"presubmit","job":"pull-ci-openshift-machine-api-operator-master-e2e","buildid":"716","prowjobid":"2695ec19-d0b6-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"machine-api-operator","repo_link":"https://github.com/openshift/machine-api-operator","base_ref":"master","base_sha":"474e14e4965a8c5e6788417c851ccc7fad1acb3a","base_link":"https://github.com/openshift/machine-api-operator/commit/474e14e4965a8c5e6788417c851ccc7fad1acb3a","pulls":[{"number":389,"author":"sadasu","sha":"229c7ea627e98ef3b7c1927a25352d366fea7023","link":"https://github.com/openshift/machine-api-operator/pull/389","commit_link":"https://github.com/openshift/machine-api-operator/pull/389/commits/229c7ea627e98ef3b7c1927a25352d366fea7023","author_link":"https://github.com/sadasu"}]}}'
+ for image in ''\''registry.svc.ci.openshift.org/ci/clonerefs:latest'\''' ''\''registry.svc.ci.openshift.org/ci/initupload:latest'\'''
+ (( i = 0 ))
+ (( i < 5 ))
+ docker pull registry.svc.ci.openshift.org/ci/clonerefs:latest
Trying to pull repository registry.svc.ci.openshift.org/ci/clonerefs ...
latest: Pulling from registry.svc.ci.openshift.org/ci/clonerefs
1160f4abea84: Pulling fs layer
be60dbe7622d: Pulling fs layer
d26b76701841: Pulling fs layer
1b90cab916ea: Pulling fs layer
3a00cbb24bdb: Pulling fs layer
1b90cab916ea: Waiting
3a00cbb24bdb: Waiting
be60dbe7622d: Verifying Checksum
be60dbe7622d: Download complete
1160f4abea84: Verifying Checksum
1160f4abea84: Download complete
3a00cbb24bdb: Verifying Checksum
3a00cbb24bdb: Download complete
d26b76701841: Verifying Checksum
d26b76701841: Download complete
1b90cab916ea: Verifying Checksum
1b90cab916ea: Download complete
1160f4abea84: Pull complete
be60dbe7622d: Pull complete
d26b76701841: Pull complete
1b90cab916ea: Pull complete
3a00cbb24bdb: Pull complete
Digest: sha256:d68e1c6c2de5c1167a79b24d5ba4f909349ca7a44fb634e214bdadc2c8b010cd
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/clonerefs:latest
+ break
+ for image in ''\''registry.svc.ci.openshift.org/ci/clonerefs:latest'\''' ''\''registry.svc.ci.openshift.org/ci/initupload:latest'\'''
+ (( i = 0 ))
+ (( i < 5 ))
+ docker pull registry.svc.ci.openshift.org/ci/initupload:latest
Trying to pull repository registry.svc.ci.openshift.org/ci/initupload ...
latest: Pulling from registry.svc.ci.openshift.org/ci/initupload
a073c86ecf9e: Pulling fs layer
cc3fc741b1a9: Pulling fs layer
8f72556ef119: Pulling fs layer
8e5b170ec95b: Pulling fs layer
8e5b170ec95b: Waiting
cc3fc741b1a9: Verifying Checksum
cc3fc741b1a9: Download complete
a073c86ecf9e: Verifying Checksum
a073c86ecf9e: Download complete
8e5b170ec95b: Verifying Checksum
8e5b170ec95b: Download complete
8f72556ef119: Verifying Checksum
8f72556ef119: Download complete
a073c86ecf9e: Pull complete
cc3fc741b1a9: Pull complete
8f72556ef119: Pull complete
8e5b170ec95b: Pull complete
Digest: sha256:e651a6455ada7c070c439eddcd753e2e2ac1fb934c4f2a526c37a4674c8eaee4
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/initupload:latest
+ break
+ clonerefs_args='--repo=openshift,cluster-api-provider-kubemark=master --repo=openshift,cluster-autoscaler-operator=master --repo=openshift,kubernetes-autoscaler=master '
+ docker run -v /data:/data:z registry.svc.ci.openshift.org/ci/clonerefs:latest --src-root=/data --log=/data/clone.json --repo=openshift,machine-api-operator=master:474e14e4965a8c5e6788417c851ccc7fad1acb3a,389:229c7ea627e98ef3b7c1927a25352d366fea7023 --repo=openshift,cluster-api-provider-kubemark=master --repo=openshift,cluster-autoscaler-operator=master --repo=openshift,kubernetes-autoscaler=master
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"machine-api-operator","base_ref":"master","base_sha":"474e14e4965a8c5e6788417c851ccc7fad1acb3a","pulls":[{"number":389,"author":"","sha":"229c7ea627e98ef3b7c1927a25352d366fea7023"}]},"time":"2019-09-06T14:56:59Z"}
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"kubernetes-autoscaler","base_ref":"master"},"time":"2019-09-06T14:56:59Z"}
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-api-provider-kubemark","base_ref":"master"},"time":"2019-09-06T14:56:59Z"}
{"component":"clonerefs","file":"prow/pod-utils/clone/clone.go:34","func":"k8s.io/test-infra/prow/pod-utils/clone.Run","level":"info","msg":"Cloning refs","refs":{"org":"openshift","repo":"cluster-autoscaler-operator","base_ref":"master"},"time":"2019-09-06T14:56:59Z"}
{"command":"mkdir -p /data/src/github.com/openshift/cluster-api-provider-kubemark","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:56:59Z"}
{"command":"mkdir -p /data/src/github.com/openshift/kubernetes-autoscaler","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:56:59Z"}
{"command":"mkdir -p /data/src/github.com/openshift/cluster-autoscaler-operator","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:56:59Z"}
{"command":"mkdir -p /data/src/github.com/openshift/machine-api-operator","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:56:59Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/kubernetes-autoscaler/.git/\n","time":"2019-09-06T14:56:59Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-api-provider-kubemark/.git/\n","time":"2019-09-06T14:56:59Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/cluster-autoscaler-operator/.git/\n","time":"2019-09-06T14:57:00Z"}
{"command":"git init","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Initialized empty Git repository in /data/src/github.com/openshift/machine-api-operator/.git/\n","time":"2019-09-06T14:57:00Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:00Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:00Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:00Z"}
{"command":"git config user.name ci-robot","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:00Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:00Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:00Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:00Z"}
{"command":"git config user.email ci-robot@k8s.io","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:00Z"}
{"command":"git fetch https://github.com/openshift/machine-api-operator.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/machine-api-operator\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] v0.1.0 -\u003e v0.1.0\n * [new tag] v0.2.0 -\u003e v0.2.0\n","time":"2019-09-06T14:57:02Z"}
{"command":"git fetch https://github.com/openshift/machine-api-operator.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/machine-api-operator\n * branch master -\u003e FETCH_HEAD\n","time":"2019-09-06T14:57:02Z"}
{"command":"git checkout 474e14e4965a8c5e6788417c851ccc7fad1acb3a","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out '474e14e4965a8c5e6788417c851ccc7fad1acb3a'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 474e14e... Merge pull request #391 from mgugino-upstream-stage/related-resources\n","time":"2019-09-06T14:57:03Z"}
{"command":"git branch --force master 474e14e4965a8c5e6788417c851ccc7fad1acb3a","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:03Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-06T14:57:03Z"}
{"command":"git fetch https://github.com/openshift/machine-api-operator.git pull/389/head","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/machine-api-operator\n * branch refs/pull/389/head -\u003e FETCH_HEAD\n","time":"2019-09-06T14:57:03Z"}
{"command":"git merge --no-ff 229c7ea627e98ef3b7c1927a25352d366fea7023","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Merge made by the 'recursive' strategy.\n pkg/operator/baremetal_pod.go | 64 +++++++++++++++++++++++++++++++++++++++----\n pkg/operator/sync.go | 7 +++++\n 2 files changed, 65 insertions(+), 6 deletions(-)\n","time":"2019-09-06T14:57:03Z"}
{"command":"git fetch https://github.com/openshift/cluster-autoscaler-operator.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-autoscaler-operator\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] v0.0.0 -\u003e v0.0.0\n","time":"2019-09-06T14:57:03Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:03Z"}
{"command":"git fetch https://github.com/openshift/cluster-autoscaler-operator.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-autoscaler-operator\n * branch master -\u003e FETCH_HEAD\n","time":"2019-09-06T14:57:03Z"}
{"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 045aea45... Merge pull request #117 from enxebre/more-related-objects\n","time":"2019-09-06T14:57:04Z"}
{"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:04Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-06T14:57:04Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:04Z"}
{"command":"git fetch https://github.com/openshift/cluster-api-provider-kubemark.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-kubemark\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] v1.0 -\u003e v1.0\n","time":"2019-09-06T14:57:04Z"}
{"command":"git fetch https://github.com/openshift/cluster-api-provider-kubemark.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/cluster-api-provider-kubemark\n * branch master -\u003e FETCH_HEAD\n","time":"2019-09-06T14:57:04Z"}
{"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 45659b31... Merge pull request #27 from frobware/bump-openshift-cluster-api-deps\n","time":"2019-09-06T14:57:06Z"}
{"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:06Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-06T14:57:06Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:06Z"}
{"command":"git fetch https://github.com/openshift/kubernetes-autoscaler.git --tags --prune","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/kubernetes-autoscaler\n * branch HEAD -\u003e FETCH_HEAD\n * [new tag] addon-resizer-1.8.0 -\u003e addon-resizer-1.8.0\n * [new tag] addon-resizer-1.8.1 -\u003e addon-resizer-1.8.1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.37.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.37.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.38.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.38.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.39.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.39.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.40.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.40.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.41.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.41.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.42.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.42.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.43.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.43.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.44.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.44.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.46.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.46.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.47.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.47.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.50.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.50.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.51.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.51.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.52.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.52.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.53.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.53.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.54.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.54.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.56.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.56.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.57.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.57.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.58.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.58.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.60.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.60.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.61.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.61.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.63.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.63.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.64.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.64.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.65.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.65.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.66.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.66.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.67.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.67.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.68.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.68.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-0.69.0 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-0.69.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.0-1666 -\u003e atomic-openshift-cluster-autoscaler-3.10.0-1666\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.1-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.1-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.10-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.10-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.11-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.11-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.12-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.12-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.13-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.13-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.14-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.14-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.15-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.15-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.16-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.16-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.17-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.17-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.18-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.18-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.2-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.2-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.21-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.21-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.22-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.22-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.23-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.23-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.3-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.3-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.5-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.5-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.6-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.6-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.7-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.7-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.8-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.8-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.10.9-1 -\u003e atomic-openshift-cluster-autoscaler-3.10.9-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.10.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.10.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.11.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.11.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.13.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.13.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.14.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.14.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.15.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.15.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.16.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.16.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.17.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.17.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.18.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.18.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.19.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.19.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.20.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.20.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.21.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.21.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.22.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.22.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.23.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.23.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.24.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.24.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.25.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.25.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.26.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.26.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.27.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.27.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.28.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.28.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.30.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.30.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.32.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.32.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.5.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.5.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.7.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.7.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.8.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.8.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.0-0.9.0 -\u003e atomic-openshift-cluster-autoscaler-3.11.0-0.9.0\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.100-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.100-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.104-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.104-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.105-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.105-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.106-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.106-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.107-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.107-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.108-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.108-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.109-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.109-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.11-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.11-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.110-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.110-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.111-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.111-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.112-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.112-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.113-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.113-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.114-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.114-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.115-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.115-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.116-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.116-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.117-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.117-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.119-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.119-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.12-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.12-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.121-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.121-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.122-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.122-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.123-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.123-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.124-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.124-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.125-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.125-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.126-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.126-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.127-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.127-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.128-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.128-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.129-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.129-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.13-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.13-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.130-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.130-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.131-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.131-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.132-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.132-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.133-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.133-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.134-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.134-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.135-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.135-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.136-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.136-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.137-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.137-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.138-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.138-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.139-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.139-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.14-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.14-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.140-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.140-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.141-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.141-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.142-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.142-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.143-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.143-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.15-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.15-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.16-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.16-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.17-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.17-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.18-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.18-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.19-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.19-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.20-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.20-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.21-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.21-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.22-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.22-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.23-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.23-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.24-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.24-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.25-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.25-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.26-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.26-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.27-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.27-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.28-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.28-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.29-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.29-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.3-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.3-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.30-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.30-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.31-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.31-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.32-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.32-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.33-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.33-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.34-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.34-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.35-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.35-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.36-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.36-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.37-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.37-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.38-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.38-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.39-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.39-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.4-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.4-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.40-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.40-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.41-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.41-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.42-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.42-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.43-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.43-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.44-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.44-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.45-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.45-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.46-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.46-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.47-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.47-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.49-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.49-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.5-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.5-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.50-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.50-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.51-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.51-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.53-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.53-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.54-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.54-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.55-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.55-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.56-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.56-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.57-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.57-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.58-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.58-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.59-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.59-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.6-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.6-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.60-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.60-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.61-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.61-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.62-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.62-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.63-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.63-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.64-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.64-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.65-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.65-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.66-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.66-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.67-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.67-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.69-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.69-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.7-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.7-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.71-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.71-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.72-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.72-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.73-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.73-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.74-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.74-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.75-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.75-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.76-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.76-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.77-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.77-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.78-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.78-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.79-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.79-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.8-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.8-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.81-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.81-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.82-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.82-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.83-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.83-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.85-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.85-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.86-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.86-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.87-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.87-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.88-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.88-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.9-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.9-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.90-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.90-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.91-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.91-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.92-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.92-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.93-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.93-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.94-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.94-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.95-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.95-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.96-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.96-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.97-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.97-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.98-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.98-1\n * [new tag] atomic-openshift-cluster-autoscaler-3.11.99-1 -\u003e atomic-openshift-cluster-autoscaler-3.11.99-1\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.10.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.10.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.100.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.100.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.101.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.101.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.102.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.102.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.103.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.103.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.104.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.104.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.105.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.105.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.106.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.106.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.107.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.107.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.109.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.109.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.110.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.110.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.112.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.112.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.114.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.114.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.115.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.115.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.116.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.116.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.117.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.117.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.118.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.118.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.119.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.119.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.12.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.12.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.122.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.122.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.123.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.123.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.124.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.124.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.125.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.125.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.128.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.128.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.13.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.13.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.130.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.130.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.131.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.131.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.132.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.132.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.136.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.136.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.137.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.137.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.138.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.138.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.139.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.139.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.14.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.14.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.140.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.140.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.141.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.141.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.142.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.142.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.143.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.143.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.144.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.144.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.145.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.145.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.146.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.146.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.147.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.147.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.148.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.148.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.149.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.149.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.15.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.15.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.17.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.17.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.18.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.18.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.19.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.19.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.20.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.20.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.21.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.21.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.22.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.22.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.23.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.23.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.24.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.24.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.25.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.25.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.26.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.26.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.27.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.27.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.28.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.28.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.29.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.29.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.30.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.30.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.31.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.31.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.32.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.32.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.33.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.33.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.36.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.36.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.37.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.37.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.38.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.38.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.39.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.39.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.4.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.4.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.40.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.40.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.41.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.41.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.42.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.42.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.43.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.43.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.44.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.44.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.45.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.45.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.46.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.46.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.47.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.47.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.48.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.48.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.49.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.49.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.5.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.5.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.50.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.50.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.51.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.51.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.52.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.52.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.55.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.55.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.56.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.56.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.57.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.57.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.58.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.58.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.59.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.59.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.6.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.6.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.60.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.60.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.62.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.62.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.63.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.63.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.66.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.66.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.69.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.69.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.7.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.7.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.70.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.70.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.72.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.72.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.74.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.74.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.75.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.75.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.76.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.76.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.77.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.77.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.79.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.79.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.8.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.8.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.80.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.80.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.81.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.81.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.82.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.82.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.83.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.83.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.84.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.84.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.85.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.85.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.87.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.87.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.88.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.88.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.89.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.89.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.9.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.9.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.91.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.91.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.92.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.92.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.93.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.93.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.94.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.94.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.95.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.95.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.96.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.96.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.97.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.97.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.98.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.98.0\n * [new tag] atomic-openshift-cluster-autoscaler-4.0.0-0.99.0 -\u003e atomic-openshift-cluster-autoscaler-4.0.0-0.99.0\n * [new tag] cluster-autoscaler-0.5.2 -\u003e cluster-autoscaler-0.5.2\n * [new tag] cluster-autoscaler-0.5.3 -\u003e cluster-autoscaler-0.5.3\n * [new tag] cluster-autoscaler-0.5.4 -\u003e cluster-autoscaler-0.5.4\n * [new tag] cluster-autoscaler-0.6.0 -\u003e cluster-autoscaler-0.6.0\n * [new tag] cluster-autoscaler-0.6.1 -\u003e cluster-autoscaler-0.6.1\n * [new tag] cluster-autoscaler-0.6.2 -\u003e cluster-autoscaler-0.6.2\n * [new tag] cluster-autoscaler-0.6.3 -\u003e cluster-autoscaler-0.6.3\n * [new tag] cluster-autoscaler-0.6.4 -\u003e cluster-autoscaler-0.6.4\n * [new tag] cluster-autoscaler-1.0.0 -\u003e cluster-autoscaler-1.0.0\n * [new tag] cluster-autoscaler-1.0.1 -\u003e cluster-autoscaler-1.0.1\n * [new tag] cluster-autoscaler-1.0.2 -\u003e cluster-autoscaler-1.0.2\n * [new tag] cluster-autoscaler-1.0.3 -\u003e cluster-autoscaler-1.0.3\n * [new tag] cluster-autoscaler-1.0.4 -\u003e cluster-autoscaler-1.0.4\n * [new tag] cluster-autoscaler-1.0.5 -\u003e cluster-autoscaler-1.0.5\n * [new tag] cluster-autoscaler-1.1.0 -\u003e cluster-autoscaler-1.1.0\n * [new tag] cluster-autoscaler-1.1.1 -\u003e cluster-autoscaler-1.1.1\n * [new tag] cluster-autoscaler-1.1.2 -\u003e cluster-autoscaler-1.1.2\n * [new tag] cluster-autoscaler-1.2.0 -\u003e cluster-autoscaler-1.2.0\n * [new tag] cluster-autoscaler-1.2.1 -\u003e cluster-autoscaler-1.2.1\n * [new tag] cluster-autoscaler-1.2.2 -\u003e cluster-autoscaler-1.2.2\n * [new tag] v3.10.0 -\u003e v3.10.0\n * [new tag] v3.10.0-alpha.0 -\u003e v3.10.0-alpha.0\n * [new tag] v3.10.0-rc.0 -\u003e v3.10.0-rc.0\n * [new tag] v3.11 -\u003e v3.11\n * [new tag] v3.11.0 -\u003e v3.11.0\n * [new tag] v3.11.0-alpha.0 -\u003e v3.11.0-alpha.0\n * [new tag] vertical-pod-autoscaler-0.1 -\u003e vertical-pod-autoscaler-0.1\n","time":"2019-09-06T14:57:08Z"}
{"command":"git fetch https://github.com/openshift/kubernetes-autoscaler.git master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"From https://github.com/openshift/kubernetes-autoscaler\n * branch master -\u003e FETCH_HEAD\n","time":"2019-09-06T14:57:08Z"}
{"command":"git checkout FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Note: checking out 'FETCH_HEAD'.\n\nYou are in 'detached HEAD' state. You can look around, make experimental\nchanges and commit them, and you can discard any commits you make in this\nstate without impacting any branches by performing another checkout.\n\nIf you want to create a new branch to retain commits you create, you may\ndo so (now or later) by using -b with the checkout command again. Example:\n\n git checkout -b \u003cnew-branch-name\u003e\n\nHEAD is now at 18a08df11... Merge pull request #114 from ingvagabund/goimports-makefile\n","time":"2019-09-06T14:57:11Z"}
{"command":"git branch --force master FETCH_HEAD","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:11Z"}
{"command":"git checkout master","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"Switched to branch 'master'\n","time":"2019-09-06T14:57:11Z"}
{"command":"git submodule update --init --recursive","component":"clonerefs","error":null,"file":"prow/pod-utils/clone/clone.go:42","func":"k8s.io/test-infra/prow/pod-utils/clone.Run.func1","level":"info","msg":"Ran command","output":"","time":"2019-09-06T14:57:11Z"}
{"component":"clonerefs","file":"prow/cmd/clonerefs/main.go:43","func":"main.main","level":"info","msg":"Finished cloning refs","time":"2019-09-06T14:57:11Z"}
+ docker run -e 'JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-machine-api-operator-master-e2e","buildid":"716","prowjobid":"2695ec19-d0b6-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"machine-api-operator","repo_link":"https://github.com/openshift/machine-api-operator","base_ref":"master","base_sha":"474e14e4965a8c5e6788417c851ccc7fad1acb3a","base_link":"https://github.com/openshift/machine-api-operator/commit/474e14e4965a8c5e6788417c851ccc7fad1acb3a","pulls":[{"number":389,"author":"sadasu","sha":"229c7ea627e98ef3b7c1927a25352d366fea7023","link":"https://github.com/openshift/machine-api-operator/pull/389","commit_link":"https://github.com/openshift/machine-api-operator/pull/389/commits/229c7ea627e98ef3b7c1927a25352d366fea7023","author_link":"https://github.com/sadasu"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/initupload:latest --clone-log=/data/clone.json --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin
{"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-machine-api-operator-master-e2e/716.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T14:57:14Z"}
{"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-machine-api-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T14:57:14Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_machine-api-operator/389/pull-ci-openshift-machine-api-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T14:57:14Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_machine-api-operator/389/pull-ci-openshift-machine-api-operator-master-e2e/716/clone-records.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T14:57:14Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_machine-api-operator/389/pull-ci-openshift-machine-api-operator-master-e2e/716/started.json","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T14:57:14Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_machine-api-operator/389/pull-ci-openshift-machine-api-operator-master-e2e/716/clone-log.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T14:57:14Z"}
{"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-machine-api-operator-master-e2e/716.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T14:57:15Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_machine-api-operator/389/pull-ci-openshift-machine-api-operator-master-e2e/716/clone-records.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T14:57:15Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_machine-api-operator/389/pull-ci-openshift-machine-api-operator-master-e2e/716/started.json","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T14:57:15Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_machine-api-operator/389/pull-ci-openshift-machine-api-operator-master-e2e/716/clone-log.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T14:57:15Z"}
{"component":"initupload","dest":"pr-logs/directory/pull-ci-openshift-machine-api-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T14:57:15Z"}
{"component":"initupload","dest":"pr-logs/pull/openshift_machine-api-operator/389/pull-ci-openshift-machine-api-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T14:57:15Z"}
{"component":"initupload","file":"prow/gcsupload/run.go:65","func":"k8s.io/test-infra/prow/gcsupload.Options.Run","level":"info","msg":"Finished upload to GCS","time":"2019-09-06T14:57:15Z"}
+ sudo chmod -R a+rwX /data
+ sudo chown -R origin:origin-git /data
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: SYNC REPOSITORIES [00h 01m 14s] ##########
[workspace] $ /bin/bash /tmp/jenkins3575262279710769501.sh
########## STARTING STAGE: FORWARD PARAMETERS TO THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod o+rw /etc/environment
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''JOB_NAME=pull-ci-openshift-machine-api-operator-master-e2e'\'' >> /etc/environment'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'echo '\''BUILD_NUMBER=716'\'' >> /etc/environment'
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: FORWARD PARAMETERS TO THE REMOTE HOST [00h 00m 01s] ##########
[workspace] $ /bin/bash /tmp/jenkins1464767535790543971.sh
########## STARTING STAGE: INSTALL MINIKUBE ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.LPItgphIRQ
+ cat
+ chmod +x /tmp/tmp.LPItgphIRQ
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.LPItgphIRQ openshiftdevel:/tmp/tmp.LPItgphIRQ
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.LPItgphIRQ"'
+ cd /home/origin
+ curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.30.0/minikube-linux-amd64
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
18 40.3M 18 7792k 0 0 8489k 0 0:00:04 --:--:-- 0:00:04 8488k
100 40.3M 100 40.3M 0 0 31.0M 0 0:00:01 0:00:01 --:--:-- 31.0M
+ chmod +x minikube
+ sudo mv minikube /usr/bin/
+ curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.10.0/bin/linux/amd64/kubectl
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
47 51.7M 47 24.4M 0 0 41.1M 0 0:00:01 --:--:-- 0:00:01 41.1M
100 51.7M 100 51.7M 0 0 59.9M 0 --:--:-- --:--:-- --:--:-- 59.9M
+ chmod +x kubectl
+ sudo mv kubectl /usr/bin/
+ sudo yum install -y ebtables
Loaded plugins: amazon-id, rhui-lb, search-disabled-repos
Resolving Dependencies
--> Running transaction check
---> Package ebtables.x86_64 0:2.0.10-16.el7 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================
Package Arch Version Repository Size
================================================================================
Installing:
ebtables x86_64 2.0.10-16.el7 oso-rhui-rhel-server-releases 123 k
Transaction Summary
================================================================================
Install 1 Package
Total download size: 123 k
Installed size: 343 k
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : ebtables-2.0.10-16.el7.x86_64 1/1
Verifying : ebtables-2.0.10-16.el7.x86_64 1/1
Installed:
ebtables.x86_64 0:2.0.10-16.el7
Complete!
+ VERSION=v1.13.0
+ wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz
--2019-09-06 14:58:34-- https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.13.0/crictl-v1.13.0-linux-amd64.tar.gz
Resolving github.com (github.com)... 192.30.253.113
Connecting to github.com (github.com)|192.30.253.113|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://github-production-release-asset-2e65be.s3.amazonaws.com/80172100/61627180-fed9-11e8-9958-15e7eb90aa9e?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20190906%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20190906T145834Z&X-Amz-Expires=300&X-Amz-Signature=24d06e03131cdff72203a5fe92a5ecfab493cd498332b2020c675ae64e6ad3bd&X-Amz-SignedHeaders=host&actor_id=0&response-content-disposition=attachment%3B%20filename%3Dcrictl-v1.13.0-linux-amd64.tar.gz&response-content-type=application%2Foctet-stream [following]
--2019-09-06 14:58:34-- https://github-production-release-asset-2e65be.s3.amazonaws.com/80172100/61627180-fed9-11e8-9958-15e7eb90aa9e?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20190906%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20190906T145834Z&X-Amz-Expires=300&X-Amz-Signature=24d06e03131cdff72203a5fe92a5ecfab493cd498332b2020c675ae64e6ad3bd&X-Amz-SignedHeaders=host&actor_id=0&response-content-disposition=attachment%3B%20filename%3Dcrictl-v1.13.0-linux-amd64.tar.gz&response-content-type=application%2Foctet-stream
Resolving github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)... 52.217.37.132
Connecting to github-production-release-asset-2e65be.s3.amazonaws.com (github-production-release-asset-2e65be.s3.amazonaws.com)|52.217.37.132|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 10631149 (10M) [application/octet-stream]
Saving to: ‘crictl-v1.13.0-linux-amd64.tar.gz’
0K .......... .......... .......... .......... .......... 0% 120M 0s
50K .......... .......... .......... .......... .......... 0% 75.1M 0s
100K .......... .......... .......... .......... .......... 1% 65.3M 0s
150K .......... .......... .......... .......... .......... 1% 70.6M 0s
200K .......... .......... .......... .......... .......... 2% 82.7M 0s
250K .......... .......... .......... .......... .......... 2% 98.7M 0s
300K .......... .......... .......... .......... .......... 3% 91.8M 0s
350K .......... .......... .......... .......... .......... 3% 99.1M 0s
400K .......... .......... .......... .......... .......... 4% 106M 0s
450K .......... .......... .......... .......... .......... 4% 114M 0s
500K .......... .......... .......... .......... .......... 5% 94.6M 0s
550K .......... .......... .......... .......... .......... 5% 121M 0s
600K .......... .......... .......... .......... .......... 6% 119M 0s
650K .......... .......... .......... .......... .......... 6% 101M 0s
700K .......... .......... .......... .......... .......... 7% 133M 0s
750K .......... .......... .......... .......... .......... 7% 127M 0s
800K .......... .......... .......... .......... .......... 8% 137M 0s
850K .......... .......... .......... .......... .......... 8% 176M 0s
900K .......... .......... .......... .......... .......... 9% 149M 0s
950K .......... .......... .......... .......... .......... 9% 138M 0s
1000K .......... .......... .......... .......... .......... 10% 111M 0s
1050K .......... .......... .......... .......... .......... 10% 137M 0s
1100K .......... .......... .......... .......... .......... 11% 138M 0s
1150K .......... .......... .......... .......... .......... 11% 136M 0s
1200K .......... .......... .......... .......... .......... 12% 131M 0s
1250K .......... .......... .......... .......... .......... 12% 125M 0s
1300K .......... .......... .......... .......... .......... 13% 162M 0s
1350K .......... .......... .......... .......... .......... 13% 136M 0s
1400K .......... .......... .......... .......... .......... 13% 133M 0s
1450K .......... .......... .......... .......... .......... 14% 132M 0s
1500K .......... .......... .......... .......... .......... 14% 150M 0s
1550K .......... .......... .......... .......... .......... 15% 181M 0s
1600K .......... .......... .......... .......... .......... 15% 126M 0s
1650K .......... .......... .......... .......... .......... 16% 123M 0s
1700K .......... .......... .......... .......... .......... 16% 120M 0s
1750K .......... .......... .......... .......... .......... 17% 178M 0s
1800K .......... .......... .......... .......... .......... 17% 185M 0s
1850K .......... .......... .......... .......... .......... 18% 189M 0s
1900K .......... .......... .......... .......... .......... 18% 134M 0s
1950K .......... .......... .......... .......... .......... 19% 140M 0s
2000K .......... .......... .......... .......... .......... 19% 124M 0s
2050K .......... .......... .......... .......... .......... 20% 199M 0s
2100K .......... .......... .......... .......... .......... 20% 198M 0s
2150K .......... .......... .......... .......... .......... 21% 207M 0s
2200K .......... .......... .......... .......... .......... 21% 119M 0s
2250K .......... .......... .......... .......... .......... 22% 120M 0s
2300K .......... .......... .......... .......... .......... 22% 130M 0s
2350K .......... .......... .......... .......... .......... 23% 165M 0s
2400K .......... .......... .......... .......... .......... 23% 184M 0s
2450K .......... .......... .......... .......... .......... 24% 55.4M 0s
2500K .......... .......... .......... .......... .......... 24% 118M 0s
2550K .......... .......... .......... .......... .......... 25% 128M 0s
2600K .......... .......... .......... .......... .......... 25% 164M 0s
2650K .......... .......... .......... .......... .......... 26% 179M 0s
2700K .......... .......... .......... .......... .......... 26% 138M 0s
2750K .......... .......... .......... .......... .......... 26% 113M 0s
2800K .......... .......... .......... .......... .......... 27% 138M 0s
2850K .......... .......... .......... .......... .......... 27% 132M 0s
2900K .......... .......... .......... .......... .......... 28% 141M 0s
2950K .......... .......... .......... .......... .......... 28% 76.8M 0s
3000K .......... .......... .......... .......... .......... 29% 121M 0s
3050K .......... .......... .......... .......... .......... 29% 85.0M 0s
3100K .......... .......... .......... .......... .......... 30% 86.6M 0s
3150K .......... .......... .......... .......... .......... 30% 98.3M 0s
3200K .......... .......... .......... .......... .......... 31% 85.1M 0s
3250K .......... .......... .......... .......... .......... 31% 86.0M 0s
3300K .......... .......... .......... .......... .......... 32% 98.9M 0s
3350K .......... .......... .......... .......... .......... 32% 85.3M 0s
3400K .......... .......... .......... .......... .......... 33% 117M 0s
3450K .......... .......... .......... .......... .......... 33% 98.0M 0s
3500K .......... .......... .......... .......... .......... 34% 83.5M 0s
3550K .......... .......... .......... .......... .......... 34% 103M 0s
3600K .......... .......... .......... .......... .......... 35% 88.6M 0s
3650K .......... .......... .......... .......... .......... 35% 87.8M 0s
3700K .......... .......... .......... .......... .......... 36% 85.5M 0s
3750K .......... .......... .......... .......... .......... 36% 108M 0s
3800K .......... .......... .......... .......... .......... 37% 84.3M 0s
3850K .......... .......... .......... .......... .......... 37% 103M 0s
3900K .......... .......... .......... .......... .......... 38% 84.3M 0s
3950K .......... .......... .......... .......... .......... 38% 86.4M 0s
4000K .......... .......... .......... .......... .......... 39% 102M 0s
4050K .......... .......... .......... .......... .......... 39% 95.9M 0s
4100K .......... .......... .......... .......... .......... 39% 85.8M 0s
4150K .......... .......... .......... .......... .......... 40% 85.3M 0s
4200K .......... .......... .......... .......... .......... 40% 98.1M 0s
4250K .......... .......... .......... .......... .......... 41% 119M 0s
4300K .......... .......... .......... .......... .......... 41% 90.9M 0s
4350K .......... .......... .......... .......... .......... 42% 89.2M 0s
4400K .......... .......... .......... .......... .......... 42% 102M 0s
4450K .......... .......... .......... .......... .......... 43% 78.1M 0s
4500K .......... .......... .......... .......... .......... 43% 102M 0s
4550K .......... .......... .......... .......... .......... 44% 94.8M 0s
4600K .......... .......... .......... .......... .......... 44% 88.4M 0s
4650K .......... .......... .......... .......... .......... 45% 96.9M 0s
4700K .......... .......... .......... .......... .......... 45% 98.6M 0s
4750K .......... .......... .......... .......... .......... 46% 83.3M 0s
4800K .......... .......... .......... .......... .......... 46% 102M 0s
4850K .......... .......... .......... .......... .......... 47% 100M 0s
4900K .......... .......... .......... .......... .......... 47% 81.9M 0s
4950K .......... .......... .......... .......... .......... 48% 104M 0s
5000K .......... .......... .......... .......... .......... 48% 82.7M 0s
5050K .......... .......... .......... .......... .......... 49% 101M 0s
5100K .......... .......... .......... .......... .......... 49% 111M 0s
5150K .......... .......... .......... .......... .......... 50% 93.3M 0s
5200K .......... .......... .......... .......... .......... 50% 97.7M 0s
5250K .......... .......... .......... .......... .......... 51% 86.2M 0s
5300K .......... .......... .......... .......... .......... 51% 99.8M 0s
5350K .......... .......... .......... .......... .......... 52% 88.9M 0s
5400K .......... .......... .......... .......... .......... 52% 102M 0s
5450K .......... .......... .......... .......... .......... 52% 93.0M 0s
5500K .......... .......... .......... .......... .......... 53% 99.1M 0s
5550K .......... .......... .......... .......... .......... 53% 93.6M 0s
5600K .......... .......... .......... .......... .......... 54% 97.4M 0s
5650K .......... .......... .......... .......... .......... 54% 86.2M 0s
5700K .......... .......... .......... .......... .......... 55% 99.6M 0s
5750K .......... .......... .......... .......... .......... 55% 98.8M 0s
5800K .......... .......... .......... .......... .......... 56% 85.2M 0s
5850K .......... .......... .......... .......... .......... 56% 103M 0s
5900K .......... .......... .......... .......... .......... 57% 80.8M 0s
5950K .......... .......... .......... .......... .......... 57% 119M 0s
6000K .......... .......... .......... .......... .......... 58% 15.6M 0s
6050K .......... .......... .......... .......... .......... 58% 85.8M 0s
6100K .......... .......... .......... .......... .......... 59% 124M 0s
6150K .......... .......... .......... .......... .......... 59% 123M 0s
6200K .......... .......... .......... .......... .......... 60% 124M 0s
6250K .......... .......... .......... .......... .......... 60% 141M 0s
6300K .......... .......... .......... .......... .......... 61% 138M 0s
6350K .......... .......... .......... .......... .......... 61% 147M 0s
6400K .......... .......... .......... .......... .......... 62% 153M 0s
6450K .......... .......... .......... .......... .......... 62% 133M 0s
6500K .......... .......... .......... .......... .......... 63% 143M 0s
6550K .......... .......... .......... .......... .......... 63% 144M 0s
6600K .......... .......... .......... .......... .......... 64% 141M 0s
6650K .......... .......... .......... .......... .......... 64% 129M 0s
6700K .......... .......... .......... .......... .......... 65% 157M 0s
6750K .......... .......... .......... .......... .......... 65% 141M 0s
6800K .......... .......... .......... .......... .......... 65% 157M 0s
6850K .......... .......... .......... .......... .......... 66% 148M 0s
6900K .......... .......... .......... .......... .......... 66% 102M 0s
6950K .......... .......... .......... .......... .......... 67% 82.9M 0s
7000K .......... .......... .......... .......... .......... 67% 86.1M 0s
7050K .......... .......... .......... .......... .......... 68% 99.4M 0s
7100K .......... .......... .......... .......... .......... 68% 84.9M 0s
7150K .......... .......... .......... .......... .......... 69% 86.1M 0s
7200K .......... .......... .......... .......... .......... 69% 99.8M 0s
7250K .......... .......... .......... .......... .......... 70% 92.2M 0s
7300K .......... .......... .......... .......... .......... 70% 101M 0s
7350K .......... .......... .......... .......... .......... 71% 86.0M 0s
7400K .......... .......... .......... .......... .......... 71% 84.6M 0s
7450K .......... .......... .......... .......... .......... 72% 85.2M 0s
7500K .......... .......... .......... .......... .......... 72% 103M 0s
7550K .......... .......... .......... .......... .......... 73% 82.0M 0s
7600K .......... .......... .......... .......... .......... 73% 85.8M 0s
7650K .......... .......... .......... .......... .......... 74% 118M 0s
7700K .......... .......... .......... .......... .......... 74% 95.0M 0s
7750K .......... .......... .......... .......... .......... 75% 92.2M 0s
7800K .......... .......... .......... .......... .......... 75% 94.6M 0s
7850K .......... .......... .......... .......... .......... 76% 82.8M 0s
7900K .......... .......... .......... .......... .......... 76% 99.4M 0s
7950K .......... .......... .......... .......... .......... 77% 79.9M 0s
8000K .......... .......... .......... .......... .......... 77% 86.7M 0s
8050K .......... .......... .......... .......... .......... 78% 85.4M 0s
8100K .......... .......... .......... .......... .......... 78% 113M 0s
8150K .......... .......... .......... .......... .......... 78% 84.9M 0s
8200K .......... .......... .......... .......... .......... 79% 84.6M 0s
8250K .......... .......... .......... .......... .......... 79% 97.9M 0s
8300K .......... .......... .......... .......... .......... 80% 86.6M 0s
8350K .......... .......... .......... .......... .......... 80% 97.6M 0s
8400K .......... .......... .......... .......... .......... 81% 85.0M 0s
8450K .......... .......... .......... .......... .......... 81% 85.2M 0s
8500K .......... .......... .......... .......... .......... 82% 118M 0s
8550K .......... .......... .......... .......... .......... 82% 90.2M 0s
8600K .......... .......... .......... .......... .......... 83% 89.8M 0s
8650K .......... .......... .......... .......... .......... 83% 92.7M 0s
8700K .......... .......... .......... .......... .......... 84% 90.8M 0s
8750K .......... .......... .......... .......... .......... 84% 92.1M 0s
8800K .......... .......... .......... .......... .......... 85% 87.5M 0s
8850K .......... .......... .......... .......... .......... 85% 91.6M 0s
8900K .......... .......... .......... .......... .......... 86% 82.5M 0s
8950K .......... .......... .......... .......... .......... 86% 105M 0s
9000K .......... .......... .......... .......... .......... 87% 85.4M 0s
9050K .......... .......... .......... .......... .......... 87% 86.9M 0s
9100K .......... .......... .......... .......... .......... 88% 97.6M 0s
9150K .......... .......... .......... .......... .......... 88% 84.9M 0s
9200K .......... .......... .......... .......... .......... 89% 99.2M 0s
9250K .......... .......... .......... .......... .......... 89% 85.8M 0s
9300K .......... .......... .......... .......... .......... 90% 85.7M 0s
9350K .......... .......... .......... .......... .......... 90% 123M 0s
9400K .......... .......... .......... .......... .......... 91% 84.4M 0s
9450K .......... .......... .......... .......... .......... 91% 87.3M 0s
9500K .......... .......... .......... .......... .......... 91% 97.7M 0s
9550K .......... .......... .......... .......... .......... 92% 86.9M 0s
9600K .......... .......... .......... .......... .......... 92% 88.9M 0s
9650K .......... .......... .......... .......... .......... 93% 94.7M 0s
9700K .......... .......... .......... .......... .......... 93% 85.3M 0s
9750K .......... .......... .......... .......... .......... 94% 88.5M 0s
9800K .......... .......... .......... .......... .......... 94% 105M 0s
9850K .......... .......... .......... .......... .......... 95% 85.3M 0s
9900K .......... .......... .......... .......... .......... 95% 86.5M 0s
9950K .......... .......... .......... .......... .......... 96% 97.4M 0s
10000K .......... .......... .......... .......... .......... 96% 85.6M 0s
10050K .......... .......... .......... .......... .......... 97% 85.7M 0s
10100K .......... .......... .......... .......... .......... 97% 97.6M 0s
10150K .......... .......... .......... .......... .......... 98% 82.6M 0s
10200K .......... .......... .......... .......... .......... 98% 90.8M 0s
10250K .......... .......... .......... .......... .......... 99% 97.0M 0s
10300K .......... .......... .......... .......... .......... 99% 98.6M 0s
10350K .......... .......... .......... . 100% 88.9M=0.1s
2019-09-06 14:58:35 (99.6 MB/s) - ‘crictl-v1.13.0-linux-amd64.tar.gz’ saved [10631149/10631149]
+ sudo tar zxvf crictl-v1.13.0-linux-amd64.tar.gz -C /usr/bin
crictl
+ rm -f crictl-v1.13.0-linux-amd64.tar.gz
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL MINIKUBE [00h 01m 16s] ##########
[workspace] $ /bin/bash /tmp/jenkins181950169828746718.sh
########## STARTING STAGE: DEPLOY KUBERNETES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.X3XuRj0mqp
+ cat
+ chmod +x /tmp/tmp.X3XuRj0mqp
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.X3XuRj0mqp openshiftdevel:/tmp/tmp.X3XuRj0mqp
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.X3XuRj0mqp"'
+ cd /home/origin
+ sudo setenforce 0
+ sudo minikube start --vm-driver=none --extra-config=kubelet.cgroup-driver=systemd --kubernetes-version v1.12.0 --v 5
Starting local Kubernetes v1.12.0 cluster...
Starting VM...
There is a newer version of minikube available (v1.3.1). Download it here:
https://github.com/kubernetes/minikube/releases/tag/v1.3.1
To disable this notification, run the following:
minikube config set WantUpdateNotification false
Creating CA: /root/.minikube/certs/ca.pem
Creating client certificate: /root/.minikube/certs/cert.pem
Getting VM IP address...
Moving files into cluster...
Downloading kubeadm v1.12.0
Downloading kubelet v1.12.0
Finished Downloading kubeadm v1.12.0
Finished Downloading kubelet v1.12.0
Setting up certs...
Connecting to cluster...
Setting up kubeconfig...
Starting cluster components...
Kubectl is now configured to use the cluster.
===================
WARNING: IT IS RECOMMENDED NOT TO RUN THE NONE DRIVER ON PERSONAL WORKSTATIONS
The 'none' driver will run an insecure kubernetes apiserver as root that may leave the host vulnerable to CSRF attacks
When using the none driver, the kubectl config and credentials generated will be root owned and will appear in the root home directory.
You will need to move the files to the appropriate location and then set the correct permissions. An example of this is below:
sudo mv /root/.kube $HOME/.kube # this will write over any previous configuration
sudo chown -R $USER $HOME/.kube
sudo chgrp -R $USER $HOME/.kube
sudo mv /root/.minikube $HOME/.minikube # this will write over any previous configuration
sudo chown -R $USER $HOME/.minikube
sudo chgrp -R $USER $HOME/.minikube
This can also be done automatically by setting the env var CHANGE_MINIKUBE_NONE_USER=true
Loading cached images from config file.
+ sudo cp /etc/ssl/certs/ca-bundle.crt /etc/ssl/certs/ca-certificates.crt
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY KUBERNETES [00h 00m 58s] ##########
[workspace] $ /bin/bash /tmp/jenkins4635626498762949358.sh
########## STARTING STAGE: INSTALL KUSTOMIZE ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.F9eJHPMX4m
+ cat
+ chmod +x /tmp/tmp.F9eJHPMX4m
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.F9eJHPMX4m openshiftdevel:/tmp/tmp.F9eJHPMX4m
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.F9eJHPMX4m"'
+ cd /home/origin
+ curl -Lo kustomize https://github.com/kubernetes-sigs/kustomize/releases/download/v2.1.0/kustomize_2.1.0_linux_amd64
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
100 618 0 618 0 0 2095 0 --:--:-- --:--:-- --:--:-- 2102
100 22.9M 100 22.9M 0 0 37.9M 0 --:--:-- --:--:-- --:--:-- 37.9M
+ chmod u+x kustomize
+ sudo mv kustomize /usr/bin/kustomize
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL KUSTOMIZE [00h 00m 02s] ##########
[workspace] $ /bin/bash /tmp/jenkins7872975445483960859.sh
########## STARTING STAGE: INSTALL IMAGEBUILDER ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.mPZOqNDcWU
+ cat
+ chmod +x /tmp/tmp.mPZOqNDcWU
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.mPZOqNDcWU openshiftdevel:/tmp/tmp.mPZOqNDcWU
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.mPZOqNDcWU"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ go get -u github.com/openshift/imagebuilder/cmd/imagebuilder
+ sudo mv /data/bin/imagebuilder /usr/bin
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL IMAGEBUILDER [00h 00m 22s] ##########
[workspace] $ /bin/bash /tmp/jenkins2698717931702222361.sh
########## STARTING STAGE: BUILD KUBEMARK MACHINE CONTROLLERS ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.m4qUzCZVZC
+ cat
+ chmod +x /tmp/tmp.m4qUzCZVZC
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.m4qUzCZVZC openshiftdevel:/tmp/tmp.m4qUzCZVZC
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.m4qUzCZVZC"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/cluster-api-provider-kubemark
+ sudo make images IMAGE=docker.io/gofed/kubemark-machine-controllers VERSION=v1.0 NO_DOCKER=1
imagebuilder -t "docker.io/gofed/kubemark-machine-controllers:v1.0" -t "docker.io/gofed/kubemark-machine-controllers:latest" ./
--> Image registry.svc.ci.openshift.org/openshift/release:golang-1.10 was not found, pulling ...
--> Pulled 0/2 layers, 20% complete
--> Pulled 1/2 layers, 51% complete
--> Pulled 2/2 layers, 100% complete
--> Extracting
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.10 as builder
--> WORKDIR /go/src/github.com/openshift/cluster-api-provider-kubemark
--> COPY . .
--> RUN go build -o ./machine-controller-manager ./cmd/manager
--> RUN go build -o ./manager ./vendor/github.com/openshift/cluster-api/cmd/manager
--> Image docker.io/gofed/base:baseci was not found, pulling ...
--> Pulled 1/2 layers, 73% complete
--> Pulled 2/2 layers, 100% complete
--> Extracting
--> FROM docker.io/gofed/base:baseci as 1
--> RUN INSTALL_PKGS=" openssh " && yum install -y $INSTALL_PKGS && rpm -V $INSTALL_PKGS && yum clean all && curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x ./kubectl && mv ./kubectl /bin/kubectl && curl -LO https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 && chmod +x ./jq-linux64 && mv ./jq-linux64 /bin/jq
Loaded plugins: fastestmirror, ovl
Determining fastest mirrors
* base: mirrors.advancedhosters.com
* extras: mirrors.advancedhosters.com
* updates: mirrors.advancedhosters.com
Resolving Dependencies
--> Running transaction check
---> Package openssh.x86_64 0:7.4p1-16.el7 will be installed
--> Processing Dependency: libfipscheck.so.1()(64bit) for package: openssh-7.4p1-16.el7.x86_64
--> Running transaction check
---> Package fipscheck-lib.x86_64 0:1.4.1-6.el7 will be installed
--> Processing Dependency: /usr/bin/fipscheck for package: fipscheck-lib-1.4.1-6.el7.x86_64
--> Running transaction check
---> Package fipscheck.x86_64 0:1.4.1-6.el7 will be installed
--> Finished Dependency Resolution
Dependencies Resolved
================================================================================
Package Arch Version Repository Size
================================================================================
Installing:
openssh x86_64 7.4p1-16.el7 base 510 k
Installing for dependencies:
fipscheck x86_64 1.4.1-6.el7 base 21 k
fipscheck-lib x86_64 1.4.1-6.el7 base 11 k
Transaction Summary
================================================================================
Install 1 Package (+2 Dependent packages)
Total download size: 542 k
Installed size: 2.0 M
Downloading packages:
--------------------------------------------------------------------------------
Total 759 kB/s | 542 kB 00:00
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
Installing : fipscheck-1.4.1-6.el7.x86_64 1/3
Installing : fipscheck-lib-1.4.1-6.el7.x86_64 2/3
Installing : openssh-7.4p1-16.el7.x86_64 3/3
Verifying : fipscheck-lib-1.4.1-6.el7.x86_64 1/3
Verifying : fipscheck-1.4.1-6.el7.x86_64 2/3
Verifying : openssh-7.4p1-16.el7.x86_64 3/3
Installed:
openssh.x86_64 0:7.4p1-16.el7
Dependency Installed:
fipscheck.x86_64 0:1.4.1-6.el7 fipscheck-lib.x86_64 0:1.4.1-6.el7
Complete!
Loaded plugins: fastestmirror, ovl
Cleaning repos: base cbs-paas7-openshift-multiarch-el7-build extras updates
Cleaning up list of fastest mirrors
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
100 40.9M 100 40.9M 0 0 66.7M 0 --:--:-- --:--:-- --:--:-- 66.8M
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
100 599 0 599 0 0 2593 0 --:--:-- --:--:-- --:--:-- 2593
100 2956k 100 2956k 0 0 10.0M 0 --:--:-- --:--:-- --:--:-- 10.0M
--> COPY --from=builder /go/src/github.com/openshift/cluster-api-provider-kubemark/manager /
--> COPY --from=builder /go/src/github.com/openshift/cluster-api-provider-kubemark/machine-controller-manager /
--> Committing changes to docker.io/gofed/kubemark-machine-controllers:v1.0 ...
--> Tagged as docker.io/gofed/kubemark-machine-controllers:latest
--> Done
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: BUILD KUBEMARK MACHINE CONTROLLERS [00h 01m 37s] ##########
[workspace] $ /bin/bash /tmp/jenkins5384870064827320769.sh
########## STARTING STAGE: BUILD CLUSTER AUTOSCALER ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.P2s4wyJuCi
+ cat
+ chmod +x /tmp/tmp.P2s4wyJuCi
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.P2s4wyJuCi openshiftdevel:/tmp/tmp.P2s4wyJuCi
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.P2s4wyJuCi"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/kubernetes-autoscaler
+ sudo imagebuilder -f images/cluster-autoscaler/Dockerfile -t docker.io/openshift/origin-cluster-autoscaler:v4.0 .
--> Image registry.svc.ci.openshift.org/openshift/release:golang-1.12 was not found, pulling ...
--> Pulled 1/2 layers, 65% complete
--> Pulled 2/2 layers, 100% complete
--> Extracting
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder
--> WORKDIR /go/src/k8s.io/autoscaler
--> COPY . .
--> RUN go build -o cluster-autoscaler/cluster-autoscaler ./cluster-autoscaler
--> Image registry.svc.ci.openshift.org/openshift/origin-v4.0:base was not found, pulling ...
--> Pulled 2/4 layers, 54% complete
--> Pulled 3/4 layers, 82% complete
--> Pulled 4/4 layers, 100% complete
--> Extracting
--> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1
--> COPY --from=builder /go/src/k8s.io/autoscaler/cluster-autoscaler/cluster-autoscaler /usr/bin/
--> CMD /usr/bin/cluster-autoscaler
--> LABEL summary="Cluster Autoscaler for OpenShift and Kubernetes"
--> Committing changes to docker.io/openshift/origin-cluster-autoscaler:v4.0 ...
--> Done
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: BUILD CLUSTER AUTOSCALER [00h 02m 22s] ##########
[workspace] $ /bin/bash /tmp/jenkins3961425016706187314.sh
########## STARTING STAGE: DEPLOY MACHINE API OPERATOR ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.VcEYgLp21m
+ cat
+ chmod +x /tmp/tmp.VcEYgLp21m
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.VcEYgLp21m openshiftdevel:/tmp/tmp.VcEYgLp21m
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.VcEYgLp21m"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/machine-api-operator
+ make build
docker run --rm -v "/data/src/github.com/openshift/machine-api-operator":/go/src/github.com/openshift/machine-api-operator:Z -w /go/src/github.com/openshift/machine-api-operator golang:1.12 ./hack/go-build.sh machine-api-operator
Unable to find image 'golang:1.12' locally
Trying to pull repository registry.access.redhat.com/golang ...
Pulling repository registry.access.redhat.com/golang
Trying to pull repository docker.io/library/golang ...
1.12: Pulling from docker.io/library/golang
4ae16bd47783: Pulling fs layer
bbab4ec87ac4: Pulling fs layer
2ea1f7804402: Pulling fs layer
96465440c208: Pulling fs layer
16a3d8aca6cd: Pulling fs layer
e0ec5610455a: Pulling fs layer
96d705baf026: Pulling fs layer
16a3d8aca6cd: Waiting
e0ec5610455a: Waiting
96d705baf026: Waiting
96465440c208: Waiting
2ea1f7804402: Verifying Checksum
2ea1f7804402: Download complete
bbab4ec87ac4: Verifying Checksum
bbab4ec87ac4: Download complete
16a3d8aca6cd: Verifying Checksum
16a3d8aca6cd: Download complete
4ae16bd47783: Verifying Checksum
4ae16bd47783: Download complete
96d705baf026: Verifying Checksum
96d705baf026: Download complete
96465440c208: Verifying Checksum
96465440c208: Download complete
e0ec5610455a: Verifying Checksum
e0ec5610455a: Download complete
4ae16bd47783: Pull complete
bbab4ec87ac4: Pull complete
2ea1f7804402: Pull complete
96465440c208: Pull complete
16a3d8aca6cd: Pull complete
e0ec5610455a: Pull complete
96d705baf026: Pull complete
Digest: sha256:584daefca21d7b8f0702e70bfe6a31c3a6def12ed4145a8d6fe78c205f124f1e
Status: Downloaded newer image for docker.io/golang:1.12
Using version from git...
Building github.com/openshift/machine-api-operator/cmd/machine-api-operator (v0.1.0-526-g6d1a51d5)
docker run --rm -v "/data/src/github.com/openshift/machine-api-operator":/go/src/github.com/openshift/machine-api-operator:Z -w /go/src/github.com/openshift/machine-api-operator golang:1.12 ./hack/go-build.sh nodelink-controller
Using version from git...
Building github.com/openshift/machine-api-operator/cmd/nodelink-controller (v0.1.0-526-g6d1a51d5)
docker run --rm -v "/data/src/github.com/openshift/machine-api-operator":/go/src/github.com/openshift/machine-api-operator:Z -w /go/src/github.com/openshift/machine-api-operator golang:1.12 ./hack/go-build.sh machine-healthcheck
Using version from git...
Building github.com/openshift/machine-api-operator/cmd/machine-healthcheck (v0.1.0-526-g6d1a51d5)
+ sudo imagebuilder -t docker.io/openshift/origin-machine-api-operator:v4.0.0 .
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder
--> WORKDIR /go/src/github.com/openshift/machine-api-operator
--> COPY . .
--> RUN NO_DOCKER=1 make build
./hack/go-build.sh machine-api-operator
Using version from git...
Building github.com/openshift/machine-api-operator/cmd/machine-api-operator (v0.1.0-526-g6d1a51d5)
./hack/go-build.sh nodelink-controller
Using version from git...
Building github.com/openshift/machine-api-operator/cmd/nodelink-controller (v0.1.0-526-g6d1a51d5)
./hack/go-build.sh machine-healthcheck
Using version from git...
Building github.com/openshift/machine-api-operator/cmd/machine-healthcheck (v0.1.0-526-g6d1a51d5)
--> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1
--> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/install manifests
--> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/machine-api-operator .
--> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/nodelink-controller .
--> COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/machine-healthcheck .
--> LABEL io.openshift.release.operator true
--> Committing changes to docker.io/openshift/origin-machine-api-operator:v4.0.0 ...
--> Done
+ sudo make deploy-kubemark
kustomize build config | kubectl apply -f -
namespace "kubemark-actuator" created
serviceaccount "kubemark" created
clusterrole.rbac.authorization.k8s.io "kubemark-actuator-role" created
clusterrolebinding.rbac.authorization.k8s.io "kubemark-actuator-rolebinding" created
configmap "deleteunreadynodes" created
deployment.apps "machineapi-kubemark-controllers" created
kustomize build | kubectl apply -f -
namespace "openshift-machine-api" created
customresourcedefinition.apiextensions.k8s.io "clusteroperators.config.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "featuregates.config.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "machinedisruptionbudgets.healthchecking.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "machinehealthchecks.healthchecking.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "machines.machine.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "machinesets.machine.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "prometheusrules.monitoring.coreos.com" created
customresourcedefinition.apiextensions.k8s.io "servicemonitors.monitoring.coreos.com" created
serviceaccount "machine-api-controllers" created
serviceaccount "machine-api-operator" created
role.rbac.authorization.k8s.io "machine-api-controllers" created
role.rbac.authorization.k8s.io "machine-api-operator" created
role.rbac.authorization.k8s.io "prometheus-k8s-machine-api-operator" created
clusterrole.rbac.authorization.k8s.io "machine-api-controllers" created
clusterrole.rbac.authorization.k8s.io "machine-api-operator" created
rolebinding.rbac.authorization.k8s.io "machine-api-controllers" created
rolebinding.rbac.authorization.k8s.io "machine-api-operator" created
rolebinding.rbac.authorization.k8s.io "prometheus-k8s-machine-api-operator" created
clusterrolebinding.rbac.authorization.k8s.io "machine-api-controllers" created
clusterrolebinding.rbac.authorization.k8s.io "machine-api-operator" created
configmap "machine-api-operator-images" created
service "machine-api-operator" created
deployment.apps "machine-api-operator" created
clusteroperator.config.openshift.io "machine-api" created
kubectl apply -f config/kubemark-config-infra.yaml
customresourcedefinition.apiextensions.k8s.io "infrastructures.config.openshift.io" created
infrastructure.config.openshift.io "cluster" created
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY MACHINE API OPERATOR [00h 03m 36s] ##########
[workspace] $ /bin/bash /tmp/jenkins6161104429701592219.sh
########## STARTING STAGE: DEPLOY CLUSTER AUTOSCALER OPERATOR ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.MX5xcQ1Enl
+ cat
+ chmod +x /tmp/tmp.MX5xcQ1Enl
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.MX5xcQ1Enl openshiftdevel:/tmp/tmp.MX5xcQ1Enl
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.MX5xcQ1Enl"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/cluster-autoscaler-operator/
+ sudo imagebuilder -t quay.io/openshift/origin-cluster-autoscaler-operator:v4.0 .
--> FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 as builder
--> WORKDIR /go/src/github.com/openshift/cluster-autoscaler-operator
--> COPY . .
--> ENV NO_DOCKER=1
--> ENV BUILD_DEST=/go/bin/cluster-autoscaler-operator
--> RUN unset VERSION && make build
go build -ldflags "-X github.com/openshift/cluster-autoscaler-operator/pkg/version.Raw=v0.0.0-213-g045aea4" -o "/go/bin/cluster-autoscaler-operator" "github.com/openshift/cluster-autoscaler-operator/cmd/manager"
--> FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base as 1
--> COPY --from=builder /go/bin/cluster-autoscaler-operator /usr/bin/
--> COPY --from=builder /go/src/github.com/openshift/cluster-autoscaler-operator/install /manifests
--> CMD ["/usr/bin/cluster-autoscaler-operator"]
--> LABEL io.openshift.release.operator true
--> Committing changes to quay.io/openshift/origin-cluster-autoscaler-operator:v4.0 ...
--> Done
+ kustomize build
+ sudo kubectl apply -f -
customresourcedefinition.apiextensions.k8s.io "clusterautoscalers.autoscaling.openshift.io" created
customresourcedefinition.apiextensions.k8s.io "machineautoscalers.autoscaling.openshift.io" created
serviceaccount "cluster-autoscaler" created
serviceaccount "cluster-autoscaler-operator" created
role.rbac.authorization.k8s.io "cluster-autoscaler" created
role.rbac.authorization.k8s.io "prometheus-k8s-cluster-autoscaler-operator" created
role.rbac.authorization.k8s.io "cluster-autoscaler-operator" created
clusterrole.rbac.authorization.k8s.io "cluster-autoscaler" created
clusterrole.rbac.authorization.k8s.io "cluster-autoscaler-operator" created
rolebinding.rbac.authorization.k8s.io "cluster-autoscaler" created
rolebinding.rbac.authorization.k8s.io "prometheus-k8s-cluster-autoscaler-operator" created
rolebinding.rbac.authorization.k8s.io "cluster-autoscaler-operator" created
clusterrolebinding.rbac.authorization.k8s.io "cluster-autoscaler" created
clusterrolebinding.rbac.authorization.k8s.io "cluster-autoscaler-operator" created
configmap "cluster-autoscaler-operator-ca" created
secret "cluster-autoscaler-operator-cert" created
service "cluster-autoscaler-operator" created
deployment.apps "cluster-autoscaler-operator" created
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY CLUSTER AUTOSCALER OPERATOR [00h 00m 46s] ##########
[workspace] $ /bin/bash /tmp/jenkins6141386697928010368.sh
########## STARTING STAGE: DEPLOY CLUSTER RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.noKQS6YIDK
+ cat
+ chmod +x /tmp/tmp.noKQS6YIDK
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.noKQS6YIDK openshiftdevel:/tmp/tmp.noKQS6YIDK
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.noKQS6YIDK"'
+ cd /home/origin
+ export GOPATH=/data
+ GOPATH=/data
+ cd /data/src/github.com/openshift/cluster-api-provider-kubemark
+ sudo kubectl apply -f examples/machine-set.yaml
machineset.machine.openshift.io "kubemark-actuator-testing-machineset" created
+ sudo kubectl apply -f examples/static-machine.yaml
machine.machine.openshift.io "minikube-static-machine" created
+ sudo kubectl apply -f examples/worker-machinesets.yaml
machineset.machine.openshift.io "kubemark-actuator-testing-machineset-red" created
machineset.machine.openshift.io "kubemark-actuator-testing-machineset-green" created
machineset.machine.openshift.io "kubemark-actuator-testing-machineset-blue" created
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPLOY CLUSTER RESOURCES [00h 00m 02s] ##########
[workspace] $ /bin/bash /tmp/jenkins3850359014073959748.sh
########## STARTING STAGE: INSTALL GO 1.10.1 ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.xutgEMoO0Z
+ cat
+ chmod +x /tmp/tmp.xutgEMoO0Z
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.xutgEMoO0Z openshiftdevel:/tmp/tmp.xutgEMoO0Z
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.xutgEMoO0Z"'
+ cd /home/origin
+ mkdir -p /home/origin/bin
+ curl -sL -o /home/origin/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/master/gimme
+ chmod +x /home/origin/bin/gimme
+ gimme 1.10.1
unset GOOS;
unset GOARCH;
export GOROOT='/home/origin/.gimme/versions/go1.10.1.linux.amd64';
export PATH="/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:${PATH}";
go version >&2;
export GIMME_ENV="/home/origin/.gimme/envs/go1.10.1.env"
+ source /home/origin/.gimme/envs/go1.10.1.env
++ unset GOOS
++ unset GOARCH
++ export GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64
++ GOROOT=/home/origin/.gimme/versions/go1.10.1.linux.amd64
++ export PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin
++ PATH=/home/origin/.gimme/versions/go1.10.1.linux.amd64/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin
++ go version
go version go1.10.1 linux/amd64
+ sudo cp /home/origin/.gimme/versions/go1.10.1.linux.amd64/bin/go /bin/go
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL GO 1.10.1 [00h 00m 07s] ##########
[workspace] $ /bin/bash /tmp/jenkins1990061321328044952.sh
########## STARTING STAGE: RUN E2E TESTS ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.FHlRwRMQYA
+ cat
+ chmod +x /tmp/tmp.FHlRwRMQYA
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.FHlRwRMQYA openshiftdevel:/tmp/tmp.FHlRwRMQYA
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 14400 /tmp/tmp.FHlRwRMQYA"'
+ cd /home/origin
+ set +x
go version go1.10.1 linux/amd64
./hack/e2e.sh test-e2e
make[1]: Entering directory `/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg'
# Run operator tests first to preserve logs for troubleshooting test
# failures and flakes.
# Feature:Operator tests remove deployments. Thus loosing all the logs
# previously acquired.
hack/ci-integration.sh -ginkgo.v -ginkgo.noColor=true -ginkgo.focus "Feature:Operators" -ginkgo.failFast
=== RUN TestE2E
Running Suite: Machine Suite
============================
Random Seed: 1567782566
Will run 7 of 16 specs
[Feature:Operators] Cluster autoscaler operator deployment should
be available
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:79
I0906 15:09:26.652799 5009 framework.go:406] >>> kubeConfig: /root/.kube/config
I0906 15:09:26.689216 5009 deloyment.go:58] Deployment "cluster-autoscaler-operator" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0)
•SSSSSSS
------------------------------
[Feature:Operators] Cluster autoscaler cluster operator status should
be available
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:90
I0906 15:09:26.689334 5009 framework.go:406] >>> kubeConfig: /root/.kube/config
•
------------------------------
[Feature:Operators] Cluster autoscaler operator should
reject invalid ClusterAutoscaler resources early via webhook
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:33
I0906 15:09:26.706063 5009 framework.go:406] >>> kubeConfig: /root/.kube/config
•
------------------------------
[Feature:Operators] Cluster autoscaler operator should
reject invalid MachineAutoscaler resources early via webhook
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/cluster-autoscaler-operator.go:49
I0906 15:09:26.733424 5009 framework.go:406] >>> kubeConfig: /root/.kube/config
•
------------------------------
[Feature:Operators] Machine API cluster operator status should
be available
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:53
I0906 15:09:26.758277 5009 framework.go:406] >>> kubeConfig: /root/.kube/config
•
------------------------------
[Feature:Operators] Machine API operator deployment should
be available
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:18
I0906 15:09:26.770987 5009 framework.go:406] >>> kubeConfig: /root/.kube/config
I0906 15:09:26.786446 5009 deloyment.go:58] Deployment "machine-api-operator" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0)
•
------------------------------
[Feature:Operators] Machine API operator deployment should
reconcile controllers deployment
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/operators/machine-api-operator.go:25
I0906 15:09:26.786512 5009 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: checking deployment "machine-api-controllers" is available
I0906 15:09:26.799549 5009 deloyment.go:58] Deployment "machine-api-controllers" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0)
STEP: deleting deployment "machine-api-controllers"
STEP: checking deployment "machine-api-controllers" is available again
E0906 15:09:26.807416 5009 deloyment.go:25] Error querying api for Deployment object "machine-api-controllers": deployments.apps "machine-api-controllers" not found, retrying...
E0906 15:09:27.810060 5009 deloyment.go:55] Deployment "machine-api-controllers" is not available. Status: (replicas: 1, updated: 1, ready: 0, available: 0, unavailable: 1)
E0906 15:09:28.812152 5009 deloyment.go:55] Deployment "machine-api-controllers" is not available. Status: (replicas: 1, updated: 1, ready: 0, available: 0, unavailable: 1)
I0906 15:09:29.812670 5009 deloyment.go:58] Deployment "machine-api-controllers" is available. Status: (replicas: 1, updated: 1, ready: 1, available: 1, unavailable: 0)
•SS
Ran 7 of 16 Specs in 3.160 seconds
SUCCESS! -- 7 Passed | 0 Failed | 0 Pending | 9 Skipped
--- PASS: TestE2E (3.16s)
PASS
ok github.com/openshift/cluster-api-actuator-pkg/pkg/e2e 3.213s
hack/ci-integration.sh -ginkgo.v -ginkgo.noColor=true -ginkgo.skip "Feature:Operators|TechPreview" -ginkgo.failFast -ginkgo.seed=1
=== RUN TestE2E
Running Suite: Machine Suite
============================
Random Seed: 1
Will run 7 of 16 specs
SSSSSSSS
------------------------------
[Feature:Machines] Autoscaler should
scale up and down
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:234
I0906 15:09:33.015012 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
I0906 15:09:33.021567 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
I0906 15:09:33.052537 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: Getting existing machinesets
STEP: Getting existing machines
STEP: Getting existing nodes
I0906 15:09:33.071035 5527 autoscaler.go:286] Have 4 existing machinesets
I0906 15:09:33.071058 5527 autoscaler.go:287] Have 5 existing machines
I0906 15:09:33.071069 5527 autoscaler.go:288] Have 5 existing nodes
STEP: Creating 3 transient machinesets
STEP: [15m0s remaining] Waiting for nodes to be Ready in 3 transient machinesets
E0906 15:09:33.099071 5527 utils.go:157] Machine "e2e-5508c-w-0-mlv9z" has no NodeRef
STEP: [14m57s remaining] Waiting for nodes to be Ready in 3 transient machinesets
I0906 15:09:36.121878 5527 utils.go:165] Machine "e2e-5508c-w-0-mlv9z" is backing node "00db6f98-a3ac-4b9d-9b06-baeefad63df4"
I0906 15:09:36.121907 5527 utils.go:149] MachineSet "e2e-5508c-w-0" have 1 nodes
E0906 15:09:36.131240 5527 utils.go:157] Machine "e2e-5508c-w-1-xxftr" has no NodeRef
STEP: [14m54s remaining] Waiting for nodes to be Ready in 3 transient machinesets
I0906 15:09:39.137754 5527 utils.go:165] Machine "e2e-5508c-w-0-mlv9z" is backing node "00db6f98-a3ac-4b9d-9b06-baeefad63df4"
I0906 15:09:39.137783 5527 utils.go:149] MachineSet "e2e-5508c-w-0" have 1 nodes
I0906 15:09:39.143301 5527 utils.go:165] Machine "e2e-5508c-w-1-xxftr" is backing node "7ab053ab-5975-4dd7-a60f-6db3990be26f"
I0906 15:09:39.143323 5527 utils.go:149] MachineSet "e2e-5508c-w-1" have 1 nodes
I0906 15:09:39.148525 5527 utils.go:165] Machine "e2e-5508c-w-2-wj4jh" is backing node "66cf1356-1533-4fee-8ea0-24d40b6aef5f"
I0906 15:09:39.148547 5527 utils.go:149] MachineSet "e2e-5508c-w-2" have 1 nodes
I0906 15:09:39.148555 5527 utils.go:177] Node "00db6f98-a3ac-4b9d-9b06-baeefad63df4" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:09:37 +0000 UTC 2019-09-06 15:09:35 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:09:37 +0000 UTC 2019-09-06 15:09:35 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:09:37 +0000 UTC 2019-09-06 15:09:35 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:09:37 +0000 UTC 2019-09-06 15:09:35 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:09:37 +0000 UTC 2019-09-06 15:09:35 +0000 UTC KubeletReady kubelet is posting ready status}]
I0906 15:09:39.148635 5527 utils.go:177] Node "7ab053ab-5975-4dd7-a60f-6db3990be26f" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:09:37 +0000 UTC 2019-09-06 15:09:35 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:09:37 +0000 UTC 2019-09-06 15:09:35 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:09:37 +0000 UTC 2019-09-06 15:09:35 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:09:37 +0000 UTC 2019-09-06 15:09:35 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:09:37 +0000 UTC 2019-09-06 15:09:35 +0000 UTC KubeletReady kubelet is posting ready status}]
I0906 15:09:39.148659 5527 utils.go:177] Node "66cf1356-1533-4fee-8ea0-24d40b6aef5f" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:09:38 +0000 UTC 2019-09-06 15:09:36 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:09:38 +0000 UTC 2019-09-06 15:09:36 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:09:38 +0000 UTC 2019-09-06 15:09:36 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:09:38 +0000 UTC 2019-09-06 15:09:36 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:09:38 +0000 UTC 2019-09-06 15:09:36 +0000 UTC KubeletReady kubelet is posting ready status}]
STEP: Getting nodes
STEP: Creating 3 machineautoscalers
I0906 15:09:39.151562 5527 autoscaler.go:340] Create MachineAutoscaler backed by MachineSet kube-system/e2e-5508c-w-0 - min:1, max:2
I0906 15:09:39.158479 5527 autoscaler.go:340] Create MachineAutoscaler backed by MachineSet kube-system/e2e-5508c-w-1 - min:1, max:2
I0906 15:09:39.162577 5527 autoscaler.go:340] Create MachineAutoscaler backed by MachineSet kube-system/e2e-5508c-w-2 - min:1, max:2
STEP: Creating ClusterAutoscaler configured with maxNodesTotal:10
STEP: Deriving Memory capacity from machine "kubemark-actuator-testing-machineset"
I0906 15:09:39.276486 5527 autoscaler.go:377] Memory capacity of worker node "359b0676-397f-402c-b209-ed17aa0a216c" is 3840Mi
STEP: Creating scale-out workload: jobs: 11, memory: 2818572300
I0906 15:09:39.304637 5527 autoscaler.go:399] [15m0s remaining] Expecting 2 "ScaledUpGroup" events; observed 0
I0906 15:09:40.379733 5527 autoscaler.go:361] cluster-autoscaler: cluster-autoscaler-default-598c649f66-tgmls became leader
I0906 15:09:42.304866 5527 autoscaler.go:399] [14m57s remaining] Expecting 2 "ScaledUpGroup" events; observed 0
I0906 15:09:45.305082 5527 autoscaler.go:399] [14m54s remaining] Expecting 2 "ScaledUpGroup" events; observed 0
I0906 15:09:48.305196 5527 autoscaler.go:399] [14m51s remaining] Expecting 2 "ScaledUpGroup" events; observed 0
I0906 15:09:50.515652 5527 autoscaler.go:361] cluster-autoscaler-status: Max total nodes in cluster reached: 10
I0906 15:09:50.518206 5527 autoscaler.go:361] cluster-autoscaler-status: Scale-up: setting group kube-system/e2e-5508c-w-1 size to 2
I0906 15:09:50.523617 5527 autoscaler.go:361] cluster-autoscaler-status: Scale-up: group kube-system/e2e-5508c-w-1 size set to 2
I0906 15:09:50.526317 5527 autoscaler.go:361] e2e-autoscaler-workload-k7d25: pod triggered scale-up: [{kube-system/e2e-5508c-w-1 1->2 (max: 2)}]
I0906 15:09:50.532696 5527 autoscaler.go:361] e2e-autoscaler-workload-7745h: pod triggered scale-up: [{kube-system/e2e-5508c-w-1 1->2 (max: 2)}]
I0906 15:09:50.538502 5527 autoscaler.go:361] e2e-autoscaler-workload-x9srw: pod triggered scale-up: [{kube-system/e2e-5508c-w-1 1->2 (max: 2)}]
I0906 15:09:50.544645 5527 autoscaler.go:361] e2e-autoscaler-workload-n5xxj: pod triggered scale-up: [{kube-system/e2e-5508c-w-1 1->2 (max: 2)}]
I0906 15:09:50.552081 5527 autoscaler.go:361] e2e-autoscaler-workload-2h24c: pod triggered scale-up: [{kube-system/e2e-5508c-w-1 1->2 (max: 2)}]
I0906 15:09:50.563329 5527 autoscaler.go:361] e2e-autoscaler-workload-hl5bk: pod triggered scale-up: [{kube-system/e2e-5508c-w-1 1->2 (max: 2)}]
I0906 15:09:50.570678 5527 autoscaler.go:361] e2e-autoscaler-workload-2lbwq: pod triggered scale-up: [{kube-system/e2e-5508c-w-1 1->2 (max: 2)}]
I0906 15:09:50.715740 5527 autoscaler.go:361] e2e-autoscaler-workload-cks94: pod triggered scale-up: [{kube-system/e2e-5508c-w-1 1->2 (max: 2)}]
I0906 15:09:51.305407 5527 autoscaler.go:399] [14m48s remaining] Expecting 2 "ScaledUpGroup" events; observed 1
I0906 15:09:54.305658 5527 autoscaler.go:399] [14m45s remaining] Expecting 2 "ScaledUpGroup" events; observed 1
I0906 15:09:57.306516 5527 autoscaler.go:399] [14m42s remaining] Expecting 2 "ScaledUpGroup" events; observed 1
I0906 15:10:00.306733 5527 autoscaler.go:399] [14m39s remaining] Expecting 2 "ScaledUpGroup" events; observed 1
I0906 15:10:00.548986 5527 autoscaler.go:361] cluster-autoscaler-status: Scale-up: setting group kube-system/e2e-5508c-w-0 size to 2
I0906 15:10:00.553746 5527 autoscaler.go:361] e2e-autoscaler-workload-cks94: pod triggered scale-up: [{kube-system/e2e-5508c-w-0 1->2 (max: 2)}]
I0906 15:10:00.560145 5527 autoscaler.go:361] cluster-autoscaler-status: Scale-up: group kube-system/e2e-5508c-w-0 size set to 2
I0906 15:10:00.562190 5527 autoscaler.go:361] e2e-autoscaler-workload-7745h: pod triggered scale-up: [{kube-system/e2e-5508c-w-0 1->2 (max: 2)}]
I0906 15:10:00.564159 5527 autoscaler.go:361] e2e-autoscaler-workload-n5xxj: pod triggered scale-up: [{kube-system/e2e-5508c-w-0 1->2 (max: 2)}]
I0906 15:10:00.570030 5527 autoscaler.go:361] e2e-autoscaler-workload-k7d25: pod triggered scale-up: [{kube-system/e2e-5508c-w-0 1->2 (max: 2)}]
I0906 15:10:00.578727 5527 autoscaler.go:361] e2e-autoscaler-workload-x9srw: pod triggered scale-up: [{kube-system/e2e-5508c-w-0 1->2 (max: 2)}]
I0906 15:10:00.587666 5527 autoscaler.go:361] e2e-autoscaler-workload-hl5bk: pod triggered scale-up: [{kube-system/e2e-5508c-w-0 1->2 (max: 2)}]
I0906 15:10:00.591015 5527 autoscaler.go:361] e2e-autoscaler-workload-2lbwq: pod triggered scale-up: [{kube-system/e2e-5508c-w-0 1->2 (max: 2)}]
I0906 15:10:03.306991 5527 autoscaler.go:399] [14m36s remaining] Expecting 2 "ScaledUpGroup" events; observed 2
I0906 15:10:03.307900 5527 autoscaler.go:414] [1m0s remaining] Waiting for cluster-autoscaler to generate a "MaxNodesTotalReached" event; observed 1
I0906 15:10:03.307930 5527 autoscaler.go:422] [1m0s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:06.308135 5527 autoscaler.go:422] [57s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:09.308419 5527 autoscaler.go:422] [54s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:12.308836 5527 autoscaler.go:422] [51s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:15.309087 5527 autoscaler.go:422] [48s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:18.309347 5527 autoscaler.go:422] [45s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:21.309578 5527 autoscaler.go:422] [42s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:24.309822 5527 autoscaler.go:422] [39s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:27.310774 5527 autoscaler.go:422] [36s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:30.310996 5527 autoscaler.go:422] [33s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:33.311243 5527 autoscaler.go:422] [30s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:36.311532 5527 autoscaler.go:422] [27s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:39.311794 5527 autoscaler.go:422] [24s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:42.312009 5527 autoscaler.go:422] [21s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:45.312269 5527 autoscaler.go:422] [18s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:48.312537 5527 autoscaler.go:422] [15s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:51.312789 5527 autoscaler.go:422] [12s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:54.313064 5527 autoscaler.go:422] [9s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:10:57.313292 5527 autoscaler.go:422] [6s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
I0906 15:11:00.313459 5527 autoscaler.go:422] [3s remaining] At max cluster size and expecting no more "ScaledUpGroup" events; currently have 2, max=2
STEP: Deleting workload
I0906 15:11:03.308151 5527 autoscaler.go:249] [cleanup] "e2e-autoscaler-workload" (*v1.Job)
I0906 15:11:03.313341 5527 autoscaler.go:434] [15m0s remaining] Expecting 2 "ScaleDownEmpty" events; observed 2
I0906 15:11:03.348034 5527 autoscaler.go:445] still have workload POD: "e2e-autoscaler-workload-2h24c"
I0906 15:11:03.348073 5527 autoscaler.go:249] [cleanup] "default" (*v1.ClusterAutoscaler)
I0906 15:11:03.452490 5527 autoscaler.go:465] Waiting for cluster-autoscaler POD "cluster-autoscaler-default-598c649f66-tgmls" to disappear
STEP: Scaling transient machinesets to zero
I0906 15:11:03.452550 5527 autoscaler.go:474] Scaling transient machineset "e2e-5508c-w-0" to zero
I0906 15:11:03.458112 5527 autoscaler.go:474] Scaling transient machineset "e2e-5508c-w-1" to zero
I0906 15:11:03.466094 5527 autoscaler.go:474] Scaling transient machineset "e2e-5508c-w-2" to zero
STEP: Waiting for scaled up nodes to be deleted
I0906 15:11:03.522000 5527 autoscaler.go:491] [15m0s remaining] Waiting for cluster to reach original node count of 5; currently have 10
I0906 15:11:06.526461 5527 autoscaler.go:491] [14m57s remaining] Waiting for cluster to reach original node count of 5; currently have 8
I0906 15:11:09.530138 5527 autoscaler.go:491] [14m54s remaining] Waiting for cluster to reach original node count of 5; currently have 5
STEP: Waiting for scaled up machines to be deleted
I0906 15:11:09.533584 5527 autoscaler.go:501] [15m0s remaining] Waiting for cluster to reach original machine count of 5; currently have 5
I0906 15:11:09.533616 5527 autoscaler.go:249] [cleanup] "autoscale-e2e-5508c-w-0mtzfn" (*v1beta1.MachineAutoscaler)
I0906 15:11:09.536918 5527 autoscaler.go:249] [cleanup] "autoscale-e2e-5508c-w-1zmp8d" (*v1beta1.MachineAutoscaler)
I0906 15:11:09.540193 5527 autoscaler.go:249] [cleanup] "autoscale-e2e-5508c-w-2z6hhv" (*v1beta1.MachineAutoscaler)
I0906 15:11:09.545457 5527 autoscaler.go:249] [cleanup] "e2e-5508c-w-0" (*v1beta1.MachineSet)
I0906 15:11:09.549133 5527 autoscaler.go:249] [cleanup] "e2e-5508c-w-1" (*v1beta1.MachineSet)
I0906 15:11:09.554079 5527 autoscaler.go:249] [cleanup] "e2e-5508c-w-2" (*v1beta1.MachineSet)
• [SLOW TEST:96.546 seconds]
[Feature:Machines] Autoscaler should
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:233
scale up and down
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go:234
------------------------------
S
------------------------------
[Feature:Machines] Managed cluster should
have machines linked with nodes
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:136
I0906 15:11:09.561108 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
I0906 15:11:09.579106 5527 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0906 15:11:09.579139 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-6pt7l" is linked to node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab"
I0906 15:11:09.579152 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-hpgct" is linked to node "8d76d38d-5446-4aef-802c-ad0fcfdb4546"
I0906 15:11:09.579160 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-nr9lx" is linked to node "359b0676-397f-402c-b209-ed17aa0a216c"
I0906 15:11:09.579169 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-s4l9g" is linked to node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b"
I0906 15:11:09.579185 5527 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"
•
------------------------------
[Feature:Machines] Managed cluster should
have ability to additively reconcile taints from machine to nodes
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:145
I0906 15:11:09.579237 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: getting machine "kubemark-actuator-testing-machineset-6pt7l"
I0906 15:11:09.598496 5527 utils.go:165] Machine "kubemark-actuator-testing-machineset-6pt7l" is backing node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab"
STEP: getting the backed node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab"
STEP: updating node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab" with taint: {not-from-machine true NoSchedule <nil>}
STEP: updating machine "kubemark-actuator-testing-machineset-6pt7l" with taint: {from-machine-8e92327e-d0b8-11e9-978c-0a445740e986 true NoSchedule <nil>}
I0906 15:11:09.607997 5527 infra.go:184] Getting node from machine again for verification of taints
I0906 15:11:09.611944 5527 utils.go:165] Machine "kubemark-actuator-testing-machineset-6pt7l" is backing node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab"
I0906 15:11:09.611980 5527 infra.go:194] Expected : map[not-from-machine:{} from-machine-8e92327e-d0b8-11e9-978c-0a445740e986:{}], observed map[kubemark:{} not-from-machine:{} from-machine-8e92327e-d0b8-11e9-978c-0a445740e986:{}] , difference map[],
STEP: Getting the latest version of the original machine
STEP: Setting back the original machine taints
STEP: Getting the latest version of the node
I0906 15:11:09.625610 5527 utils.go:165] Machine "kubemark-actuator-testing-machineset-6pt7l" is backing node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab"
STEP: Setting back the original node taints
•
------------------------------
[Feature:Machines] Managed cluster should
recover from deleted worker machines
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:220
I0906 15:11:09.629879 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: checking initial cluster state
I0906 15:11:09.657202 5527 utils.go:87] Cluster size is 5 nodes
I0906 15:11:09.657230 5527 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes
I0906 15:11:09.661265 5527 utils.go:99] MachineSet "e2e-5508c-w-0" replicas 0. Ready: 0, available 0
I0906 15:11:09.661290 5527 utils.go:99] MachineSet "e2e-5508c-w-1" replicas 0. Ready: 0, available 0
I0906 15:11:09.661299 5527 utils.go:99] MachineSet "e2e-5508c-w-2" replicas 0. Ready: 0, available 0
I0906 15:11:09.661307 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:11:09.661316 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:11:09.661325 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:11:09.661334 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:11:09.664335 5527 utils.go:231] Node "359b0676-397f-402c-b209-ed17aa0a216c". Ready: true. Unschedulable: false
I0906 15:11:09.664360 5527 utils.go:231] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab". Ready: true. Unschedulable: false
I0906 15:11:09.664371 5527 utils.go:231] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546". Ready: true. Unschedulable: false
I0906 15:11:09.664376 5527 utils.go:231] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b". Ready: true. Unschedulable: false
I0906 15:11:09.664382 5527 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:11:09.667091 5527 utils.go:87] Cluster size is 5 nodes
I0906 15:11:09.667108 5527 utils.go:257] waiting for all nodes to be ready
I0906 15:11:09.670260 5527 utils.go:262] waiting for all nodes to be schedulable
I0906 15:11:09.674411 5527 utils.go:290] [remaining 1m0s] Node "359b0676-397f-402c-b209-ed17aa0a216c" is schedulable
I0906 15:11:09.674440 5527 utils.go:290] [remaining 1m0s] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab" is schedulable
I0906 15:11:09.674450 5527 utils.go:290] [remaining 1m0s] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546" is schedulable
I0906 15:11:09.674457 5527 utils.go:290] [remaining 1m0s] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b" is schedulable
I0906 15:11:09.674463 5527 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable
I0906 15:11:09.674471 5527 utils.go:267] waiting for each node to be backed by a machine
I0906 15:11:09.684919 5527 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0906 15:11:09.684955 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-6pt7l" is linked to node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab"
I0906 15:11:09.684970 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-hpgct" is linked to node "8d76d38d-5446-4aef-802c-ad0fcfdb4546"
I0906 15:11:09.684984 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-nr9lx" is linked to node "359b0676-397f-402c-b209-ed17aa0a216c"
I0906 15:11:09.684997 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-s4l9g" is linked to node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b"
I0906 15:11:09.685015 5527 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"
STEP: getting worker node
STEP: deleting machine object "kubemark-actuator-testing-machineset-green-nr9lx"
STEP: waiting for node object "359b0676-397f-402c-b209-ed17aa0a216c" to go away
I0906 15:11:09.699018 5527 infra.go:255] Node "359b0676-397f-402c-b209-ed17aa0a216c" still exists. Node conditions are: [{OutOfDisk False 2019-09-06 15:11:09 +0000 UTC 2019-09-06 15:08:33 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:11:09 +0000 UTC 2019-09-06 15:08:33 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:11:09 +0000 UTC 2019-09-06 15:08:33 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:11:09 +0000 UTC 2019-09-06 15:08:33 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:11:09 +0000 UTC 2019-09-06 15:08:33 +0000 UTC KubeletReady kubelet is posting ready status}]
STEP: waiting for new node object to come up
I0906 15:11:14.703992 5527 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes
I0906 15:11:14.707665 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:11:14.707687 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:11:14.707694 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:11:14.707699 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:11:14.710430 5527 utils.go:231] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab". Ready: true. Unschedulable: false
I0906 15:11:14.710449 5527 utils.go:231] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546". Ready: true. Unschedulable: false
I0906 15:11:14.710454 5527 utils.go:231] Node "b3408843-b44c-4857-ab9d-3b13ab158aea". Ready: true. Unschedulable: false
I0906 15:11:14.710459 5527 utils.go:231] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b". Ready: true. Unschedulable: false
I0906 15:11:14.710468 5527 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:11:14.713015 5527 utils.go:87] Cluster size is 5 nodes
I0906 15:11:14.713041 5527 utils.go:257] waiting for all nodes to be ready
I0906 15:11:14.715847 5527 utils.go:262] waiting for all nodes to be schedulable
I0906 15:11:14.718805 5527 utils.go:290] [remaining 1m0s] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab" is schedulable
I0906 15:11:14.718828 5527 utils.go:290] [remaining 1m0s] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546" is schedulable
I0906 15:11:14.718835 5527 utils.go:290] [remaining 1m0s] Node "b3408843-b44c-4857-ab9d-3b13ab158aea" is schedulable
I0906 15:11:14.718842 5527 utils.go:290] [remaining 1m0s] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b" is schedulable
I0906 15:11:14.718862 5527 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable
I0906 15:11:14.718868 5527 utils.go:267] waiting for each node to be backed by a machine
I0906 15:11:14.724556 5527 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0906 15:11:14.724583 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-6pt7l" is linked to node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab"
I0906 15:11:14.724594 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-hpgct" is linked to node "8d76d38d-5446-4aef-802c-ad0fcfdb4546"
I0906 15:11:14.724602 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-scthk" is linked to node "b3408843-b44c-4857-ab9d-3b13ab158aea"
I0906 15:11:14.724613 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-s4l9g" is linked to node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b"
I0906 15:11:14.724634 5527 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"
• [SLOW TEST:5.095 seconds]
[Feature:Machines] Managed cluster should
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126
recover from deleted worker machines
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:220
------------------------------
[Feature:Machines] Managed cluster should
grow and decrease when scaling different machineSets simultaneously
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:267
I0906 15:11:14.724720 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: checking existing cluster size
I0906 15:11:14.740937 5527 utils.go:87] Cluster size is 5 nodes
STEP: getting worker machineSets
I0906 15:11:14.743851 5527 infra.go:297] Creating transient MachineSet "e2e-91a2d-w-0"
I0906 15:11:14.748839 5527 infra.go:297] Creating transient MachineSet "e2e-91a2d-w-1"
STEP: scaling "e2e-91a2d-w-0" from 0 to 2 replicas
I0906 15:11:14.752871 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: scaling "e2e-91a2d-w-1" from 0 to 2 replicas
I0906 15:11:14.772375 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
E0906 15:11:14.812124 5527 utils.go:157] Machine "e2e-91a2d-w-0-bcdrm" has no NodeRef
I0906 15:11:19.829884 5527 utils.go:165] Machine "e2e-91a2d-w-0-bcdrm" is backing node "41e2bf6d-a04c-4354-afea-7e711d38300e"
I0906 15:11:19.838522 5527 utils.go:165] Machine "e2e-91a2d-w-0-v2vd5" is backing node "a18bf460-d110-4af6-91a0-4af7a5c1fe76"
I0906 15:11:19.838545 5527 utils.go:149] MachineSet "e2e-91a2d-w-0" have 2 nodes
E0906 15:11:19.852028 5527 utils.go:157] Machine "e2e-91a2d-w-1-kxg8f" has no NodeRef
I0906 15:11:24.860019 5527 utils.go:165] Machine "e2e-91a2d-w-0-bcdrm" is backing node "41e2bf6d-a04c-4354-afea-7e711d38300e"
I0906 15:11:24.862527 5527 utils.go:165] Machine "e2e-91a2d-w-0-v2vd5" is backing node "a18bf460-d110-4af6-91a0-4af7a5c1fe76"
I0906 15:11:24.862548 5527 utils.go:149] MachineSet "e2e-91a2d-w-0" have 2 nodes
I0906 15:11:24.868337 5527 utils.go:165] Machine "e2e-91a2d-w-1-kxg8f" is backing node "86eff62d-6aee-4907-b3a3-b0af551e243b"
I0906 15:11:24.870121 5527 utils.go:165] Machine "e2e-91a2d-w-1-z5zn4" is backing node "f94e2b84-7660-436d-b933-ce06e9220145"
I0906 15:11:24.870145 5527 utils.go:149] MachineSet "e2e-91a2d-w-1" have 2 nodes
I0906 15:11:24.870156 5527 utils.go:177] Node "41e2bf6d-a04c-4354-afea-7e711d38300e" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:11:23 +0000 UTC 2019-09-06 15:11:17 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:11:23 +0000 UTC 2019-09-06 15:11:17 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:11:23 +0000 UTC 2019-09-06 15:11:17 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:11:23 +0000 UTC 2019-09-06 15:11:17 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:11:23 +0000 UTC 2019-09-06 15:11:17 +0000 UTC KubeletReady kubelet is posting ready status}]
I0906 15:11:24.870250 5527 utils.go:177] Node "a18bf460-d110-4af6-91a0-4af7a5c1fe76" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:11:24 +0000 UTC 2019-09-06 15:11:17 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:11:24 +0000 UTC 2019-09-06 15:11:17 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:11:24 +0000 UTC 2019-09-06 15:11:17 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:11:24 +0000 UTC 2019-09-06 15:11:17 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:11:24 +0000 UTC 2019-09-06 15:11:17 +0000 UTC KubeletReady kubelet is posting ready status}]
I0906 15:11:24.870288 5527 utils.go:177] Node "86eff62d-6aee-4907-b3a3-b0af551e243b" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:11:24 +0000 UTC 2019-09-06 15:11:19 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:11:24 +0000 UTC 2019-09-06 15:11:19 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:11:24 +0000 UTC 2019-09-06 15:11:19 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:11:24 +0000 UTC 2019-09-06 15:11:19 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:11:24 +0000 UTC 2019-09-06 15:11:19 +0000 UTC KubeletReady kubelet is posting ready status}]
I0906 15:11:24.870315 5527 utils.go:177] Node "f94e2b84-7660-436d-b933-ce06e9220145" is ready. Conditions are: [{OutOfDisk False 2019-09-06 15:11:22 +0000 UTC 2019-09-06 15:11:18 +0000 UTC KubeletHasSufficientDisk kubelet has sufficient disk space available} {MemoryPressure False 2019-09-06 15:11:22 +0000 UTC 2019-09-06 15:11:18 +0000 UTC KubeletHasSufficientMemory kubelet has sufficient memory available} {DiskPressure False 2019-09-06 15:11:22 +0000 UTC 2019-09-06 15:11:18 +0000 UTC KubeletHasNoDiskPressure kubelet has no disk pressure} {PIDPressure False 2019-09-06 15:11:22 +0000 UTC 2019-09-06 15:11:18 +0000 UTC KubeletHasSufficientPID kubelet has sufficient PID available} {Ready True 2019-09-06 15:11:22 +0000 UTC 2019-09-06 15:11:18 +0000 UTC KubeletReady kubelet is posting ready status}]
STEP: scaling "e2e-91a2d-w-0" from 2 to 0 replicas
I0906 15:11:24.870364 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: scaling "e2e-91a2d-w-1" from 2 to 0 replicas
I0906 15:11:24.892046 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: waiting for cluster to get back to original size. Final size should be 5 nodes
I0906 15:11:24.924116 5527 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes
I0906 15:11:24.989484 5527 utils.go:99] MachineSet "e2e-91a2d-w-0" replicas 0. Ready: 2, available 2
I0906 15:11:24.989519 5527 utils.go:99] MachineSet "e2e-91a2d-w-1" replicas 0. Ready: 2, available 2
I0906 15:11:24.989529 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:11:24.989539 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:11:24.989548 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:11:24.989558 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:11:25.006743 5527 utils.go:231] Node "41e2bf6d-a04c-4354-afea-7e711d38300e". Ready: true. Unschedulable: false
I0906 15:11:25.006770 5527 utils.go:231] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab". Ready: true. Unschedulable: false
I0906 15:11:25.006779 5527 utils.go:231] Node "86eff62d-6aee-4907-b3a3-b0af551e243b". Ready: true. Unschedulable: false
I0906 15:11:25.006787 5527 utils.go:231] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546". Ready: true. Unschedulable: false
I0906 15:11:25.006795 5527 utils.go:231] Node "a18bf460-d110-4af6-91a0-4af7a5c1fe76". Ready: true. Unschedulable: false
I0906 15:11:25.006803 5527 utils.go:231] Node "b3408843-b44c-4857-ab9d-3b13ab158aea". Ready: true. Unschedulable: false
I0906 15:11:25.006811 5527 utils.go:231] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b". Ready: true. Unschedulable: false
I0906 15:11:25.006823 5527 utils.go:231] Node "f94e2b84-7660-436d-b933-ce06e9220145". Ready: true. Unschedulable: false
I0906 15:11:25.006831 5527 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:11:25.023990 5527 utils.go:87] Cluster size is 9 nodes
I0906 15:11:30.024230 5527 utils.go:239] [remaining 14m55s] Cluster size expected to be 5 nodes
I0906 15:11:30.029565 5527 utils.go:99] MachineSet "e2e-91a2d-w-0" replicas 0. Ready: 0, available 0
I0906 15:11:30.029588 5527 utils.go:99] MachineSet "e2e-91a2d-w-1" replicas 0. Ready: 0, available 0
I0906 15:11:30.029598 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:11:30.029607 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:11:30.029613 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:11:30.029618 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:11:30.035465 5527 utils.go:231] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab". Ready: true. Unschedulable: false
I0906 15:11:30.035485 5527 utils.go:231] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546". Ready: true. Unschedulable: false
I0906 15:11:30.035495 5527 utils.go:231] Node "b3408843-b44c-4857-ab9d-3b13ab158aea". Ready: true. Unschedulable: false
I0906 15:11:30.035503 5527 utils.go:231] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b". Ready: true. Unschedulable: false
I0906 15:11:30.035512 5527 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:11:30.038851 5527 utils.go:87] Cluster size is 5 nodes
I0906 15:11:30.038883 5527 utils.go:257] waiting for all nodes to be ready
I0906 15:11:30.042502 5527 utils.go:262] waiting for all nodes to be schedulable
I0906 15:11:30.049309 5527 utils.go:290] [remaining 1m0s] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab" is schedulable
I0906 15:11:30.049334 5527 utils.go:290] [remaining 1m0s] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546" is schedulable
I0906 15:11:30.049346 5527 utils.go:290] [remaining 1m0s] Node "b3408843-b44c-4857-ab9d-3b13ab158aea" is schedulable
I0906 15:11:30.049357 5527 utils.go:290] [remaining 1m0s] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b" is schedulable
I0906 15:11:30.049367 5527 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable
I0906 15:11:30.049376 5527 utils.go:267] waiting for each node to be backed by a machine
I0906 15:11:30.058252 5527 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0906 15:11:30.058283 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-6pt7l" is linked to node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab"
I0906 15:11:30.058303 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-hpgct" is linked to node "8d76d38d-5446-4aef-802c-ad0fcfdb4546"
I0906 15:11:30.058318 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-scthk" is linked to node "b3408843-b44c-4857-ab9d-3b13ab158aea"
I0906 15:11:30.058331 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-s4l9g" is linked to node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b"
I0906 15:11:30.058344 5527 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"
• [SLOW TEST:15.344 seconds]
[Feature:Machines] Managed cluster should
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126
grow and decrease when scaling different machineSets simultaneously
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:267
------------------------------
[Feature:Machines] Managed cluster should
drain node before removing machine resource
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:346
I0906 15:11:30.068510 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: checking existing cluster size
I0906 15:11:30.085166 5527 utils.go:87] Cluster size is 5 nodes
STEP: Taking the first worker machineset (assuming only worker machines are backed by machinesets)
STEP: Creating two new machines, one for node about to be drained, other for moving workload from drained node
STEP: Waiting until both new nodes are ready
E0906 15:11:30.096585 5527 utils.go:342] [remaining 15m0s] Expecting 2 nodes with map[string]string{"node-role.kubernetes.io/worker":"", "node-draining-test":"54ff90f1-d0b8-11e9-978c-0a445740e986"} labels in Ready state, got 0
I0906 15:11:35.100243 5527 utils.go:346] [14m55s remaining] Expected number (2) of nodes with map[node-draining-test:54ff90f1-d0b8-11e9-978c-0a445740e986 node-role.kubernetes.io/worker:] label in Ready state found
STEP: Creating RC with workload
STEP: Creating PDB for RC
STEP: Wait until all replicas are ready
I0906 15:11:35.141657 5527 utils.go:396] [15m0s remaining] Waiting for at least one RC ready replica, ReadyReplicas: 0, Replicas: 0
I0906 15:11:40.145072 5527 utils.go:396] [14m55s remaining] Waiting for at least one RC ready replica, ReadyReplicas: 0, Replicas: 20
I0906 15:11:45.143917 5527 utils.go:399] [14m50s remaining] Waiting for RC ready replicas, ReadyReplicas: 20, Replicas: 20
I0906 15:11:45.153706 5527 utils.go:416] POD #0/20: {
"metadata": {
"name": "pdb-workload-5wbhf",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-5wbhf",
"uid": "9dce09a4-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3788",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "927f2a33-8b87-455d-9a89-7c030aa4fcf2",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.23",
"podIP": "10.211.234.220",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:40Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://2e0cea5ea8d141c4"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.153865 5527 utils.go:416] POD #1/20: {
"metadata": {
"name": "pdb-workload-747sq",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-747sq",
"uid": "9dcb94ff-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3767",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "927f2a33-8b87-455d-9a89-7c030aa4fcf2",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.23",
"podIP": "10.78.42.110",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:39Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://31ab02dda3e57412"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.154031 5527 utils.go:416] POD #2/20: {
"metadata": {
"name": "pdb-workload-bmgt5",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-bmgt5",
"uid": "9dcdb415-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3816",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "310d2184-6584-443c-83cf-1df6982bea38",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.18",
"podIP": "10.202.74.28",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:40Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://b137ec1b132a04ce"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.154199 5527 utils.go:416] POD #3/20: {
"metadata": {
"name": "pdb-workload-bzxqt",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-bzxqt",
"uid": "9dcd97eb-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3804",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "310d2184-6584-443c-83cf-1df6982bea38",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.18",
"podIP": "10.206.214.232",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:39Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://7c95463b3528976d"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.154362 5527 utils.go:416] POD #4/20: {
"metadata": {
"name": "pdb-workload-csr24",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-csr24",
"uid": "9dc9102d-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3779",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "927f2a33-8b87-455d-9a89-7c030aa4fcf2",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.23",
"podIP": "10.183.109.152",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:39Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://6659cb427942f37b"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.154490 5527 utils.go:416] POD #5/20: {
"metadata": {
"name": "pdb-workload-cwsx6",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-cwsx6",
"uid": "9dcbb286-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3782",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "927f2a33-8b87-455d-9a89-7c030aa4fcf2",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.23",
"podIP": "10.107.50.185",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:39Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://e6662125b7fe70a2"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.154610 5527 utils.go:416] POD #6/20: {
"metadata": {
"name": "pdb-workload-d9knp",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-d9knp",
"uid": "9dc9dc0a-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3770",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "927f2a33-8b87-455d-9a89-7c030aa4fcf2",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.23",
"podIP": "10.239.96.124",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:38Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://733660889ad6ab4e"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.154717 5527 utils.go:416] POD #7/20: {
"metadata": {
"name": "pdb-workload-fzkkf",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-fzkkf",
"uid": "9dc9c804-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3841",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "310d2184-6584-443c-83cf-1df6982bea38",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:42Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.18",
"podIP": "10.16.218.162",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:41Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://84965017a9a309e5"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.154838 5527 utils.go:416] POD #8/20: {
"metadata": {
"name": "pdb-workload-hkgss",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-hkgss",
"uid": "9dd0f0e1-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3838",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "310d2184-6584-443c-83cf-1df6982bea38",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.18",
"podIP": "10.24.253.172",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:39Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://874196afe775dd5e"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.154958 5527 utils.go:416] POD #9/20: {
"metadata": {
"name": "pdb-workload-jq5l8",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-jq5l8",
"uid": "9dcda9db-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3773",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "927f2a33-8b87-455d-9a89-7c030aa4fcf2",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.23",
"podIP": "10.188.188.167",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:40Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://4f58ac0b6009aa49"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.155088 5527 utils.go:416] POD #10/20: {
"metadata": {
"name": "pdb-workload-jvzrv",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-jvzrv",
"uid": "9dd1759c-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3827",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "310d2184-6584-443c-83cf-1df6982bea38",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.18",
"podIP": "10.83.195.238",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:41Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://3e1150987debfa17"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.155225 5527 utils.go:416] POD #11/20: {
"metadata": {
"name": "pdb-workload-lf6xd",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-lf6xd",
"uid": "9dcb7029-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3811",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "310d2184-6584-443c-83cf-1df6982bea38",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.18",
"podIP": "10.197.89.194",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:38Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://1ed8492024500655"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.155374 5527 utils.go:416] POD #12/20: {
"metadata": {
"name": "pdb-workload-qpcqf",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-qpcqf",
"uid": "9dcde4a1-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3820",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "310d2184-6584-443c-83cf-1df6982bea38",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.18",
"podIP": "10.61.205.104",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:40Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://1ab2d89934fe92bd"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.155505 5527 utils.go:416] POD #13/20: {
"metadata": {
"name": "pdb-workload-rv85j",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-rv85j",
"uid": "9dcbaeef-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3807",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "310d2184-6584-443c-83cf-1df6982bea38",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.18",
"podIP": "10.206.128.173",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:41Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://68d2b560e2b67fc7"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.155647 5527 utils.go:416] POD #14/20: {
"metadata": {
"name": "pdb-workload-t968g",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-t968g",
"uid": "9dcdd176-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3800",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "927f2a33-8b87-455d-9a89-7c030aa4fcf2",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.23",
"podIP": "10.103.218.85",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:40Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://b117c42312eec019"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.155781 5527 utils.go:416] POD #15/20: {
"metadata": {
"name": "pdb-workload-tn4bp",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-tn4bp",
"uid": "9dce0b68-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3823",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "310d2184-6584-443c-83cf-1df6982bea38",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.18",
"podIP": "10.236.24.225",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:39Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://41e63de3e86f5c12"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.155912 5527 utils.go:416] POD #16/20: {
"metadata": {
"name": "pdb-workload-w4kh2",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-w4kh2",
"uid": "9dd1592e-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3795",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "927f2a33-8b87-455d-9a89-7c030aa4fcf2",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.23",
"podIP": "10.142.223.35",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:39Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://d279a033e940fab0"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.156065 5527 utils.go:416] POD #17/20: {
"metadata": {
"name": "pdb-workload-wfsmn",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-wfsmn",
"uid": "9dd14795-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3831",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "310d2184-6584-443c-83cf-1df6982bea38",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.18",
"podIP": "10.220.235.32",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:40Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://36b3b7994c4f0845"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.156207 5527 utils.go:416] POD #18/20: {
"metadata": {
"name": "pdb-workload-zs4hj",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-zs4hj",
"uid": "9dcdc610-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3776",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "927f2a33-8b87-455d-9a89-7c030aa4fcf2",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.23",
"podIP": "10.28.179.7",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:40Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://3d35c91aed42ae00"
}
],
"qosClass": "Burstable"
}
}
I0906 15:11:45.156365 5527 utils.go:416] POD #19/20: {
"metadata": {
"name": "pdb-workload-zvpdf",
"generateName": "pdb-workload-",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/pods/pdb-workload-zvpdf",
"uid": "9dd1a952-d0b8-11e9-b3bc-0a445740e986",
"resourceVersion": "3785",
"creationTimestamp": "2019-09-06T15:11:35Z",
"labels": {
"app": "nginx"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ReplicationController",
"name": "pdb-workload",
"uid": "9dc65a09-d0b8-11e9-b3bc-0a445740e986",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-t266s",
"secret": {
"secretName": "default-token-t266s",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "work",
"image": "busybox",
"command": [
"sleep",
"10h"
],
"resources": {
"requests": {
"cpu": "50m",
"memory": "50Mi"
}
},
"volumeMounts": [
{
"name": "default-token-t266s",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"node-draining-test": "54ff90f1-d0b8-11e9-978c-0a445740e986",
"node-role.kubernetes.io/worker": ""
},
"serviceAccountName": "default",
"serviceAccount": "default",
"nodeName": "927f2a33-8b87-455d-9a89-7c030aa4fcf2",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "kubemark",
"operator": "Exists"
},
{
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
},
{
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"effect": "NoExecute",
"tolerationSeconds": 300
}
],
"priority": 0
},
"status": {
"phase": "Running",
"conditions": [
{
"type": "Initialized",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
},
{
"type": "Ready",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:41Z"
},
{
"type": "ContainersReady",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": null
},
{
"type": "PodScheduled",
"status": "True",
"lastProbeTime": null,
"lastTransitionTime": "2019-09-06T15:11:35Z"
}
],
"hostIP": "172.17.0.23",
"podIP": "10.218.46.158",
"startTime": "2019-09-06T15:11:35Z",
"containerStatuses": [
{
"name": "work",
"state": {
"running": {
"startedAt": "2019-09-06T15:11:39Z"
}
},
"lastState": {},
"ready": true,
"restartCount": 0,
"image": "busybox:latest",
"imageID": "docker://busybox:latest",
"containerID": "docker://8be426a8c14c7d52"
}
],
"qosClass": "Burstable"
}
}
STEP: Delete machine to trigger node draining
STEP: Observing and verifying node draining
E0906 15:11:45.165841 5527 utils.go:451] Node "310d2184-6584-443c-83cf-1df6982bea38" is expected to be marked as unschedulable, it is not
I0906 15:11:50.170823 5527 utils.go:455] [remaining 14m55s] Node "310d2184-6584-443c-83cf-1df6982bea38" is mark unschedulable as expected
I0906 15:11:50.177921 5527 utils.go:474] [remaining 14m55s] Have 9 pods scheduled to node "310d2184-6584-443c-83cf-1df6982bea38"
I0906 15:11:50.179578 5527 utils.go:490] [remaining 14m55s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:11:50.179598 5527 utils.go:500] [remaining 14m55s] Expecting at most 2 pods to be scheduled to drained node "310d2184-6584-443c-83cf-1df6982bea38", got 9
I0906 15:11:55.177703 5527 utils.go:455] [remaining 14m50s] Node "310d2184-6584-443c-83cf-1df6982bea38" is mark unschedulable as expected
I0906 15:11:55.191140 5527 utils.go:474] [remaining 14m50s] Have 8 pods scheduled to node "310d2184-6584-443c-83cf-1df6982bea38"
I0906 15:11:55.195667 5527 utils.go:490] [remaining 14m50s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:11:55.195696 5527 utils.go:500] [remaining 14m50s] Expecting at most 2 pods to be scheduled to drained node "310d2184-6584-443c-83cf-1df6982bea38", got 8
I0906 15:12:00.170299 5527 utils.go:455] [remaining 14m45s] Node "310d2184-6584-443c-83cf-1df6982bea38" is mark unschedulable as expected
I0906 15:12:00.177185 5527 utils.go:474] [remaining 14m45s] Have 7 pods scheduled to node "310d2184-6584-443c-83cf-1df6982bea38"
I0906 15:12:00.178921 5527 utils.go:490] [remaining 14m45s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:12:00.178945 5527 utils.go:500] [remaining 14m45s] Expecting at most 2 pods to be scheduled to drained node "310d2184-6584-443c-83cf-1df6982bea38", got 7
I0906 15:12:05.169999 5527 utils.go:455] [remaining 14m40s] Node "310d2184-6584-443c-83cf-1df6982bea38" is mark unschedulable as expected
I0906 15:12:05.177028 5527 utils.go:474] [remaining 14m40s] Have 6 pods scheduled to node "310d2184-6584-443c-83cf-1df6982bea38"
I0906 15:12:05.179833 5527 utils.go:490] [remaining 14m40s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:12:05.179861 5527 utils.go:500] [remaining 14m40s] Expecting at most 2 pods to be scheduled to drained node "310d2184-6584-443c-83cf-1df6982bea38", got 6
I0906 15:12:10.170902 5527 utils.go:455] [remaining 14m35s] Node "310d2184-6584-443c-83cf-1df6982bea38" is mark unschedulable as expected
I0906 15:12:10.177679 5527 utils.go:474] [remaining 14m35s] Have 5 pods scheduled to node "310d2184-6584-443c-83cf-1df6982bea38"
I0906 15:12:10.179435 5527 utils.go:490] [remaining 14m35s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:12:10.179487 5527 utils.go:500] [remaining 14m35s] Expecting at most 2 pods to be scheduled to drained node "310d2184-6584-443c-83cf-1df6982bea38", got 5
I0906 15:12:15.169974 5527 utils.go:455] [remaining 14m30s] Node "310d2184-6584-443c-83cf-1df6982bea38" is mark unschedulable as expected
I0906 15:12:15.177332 5527 utils.go:474] [remaining 14m30s] Have 4 pods scheduled to node "310d2184-6584-443c-83cf-1df6982bea38"
I0906 15:12:15.178891 5527 utils.go:490] [remaining 14m30s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:12:15.178918 5527 utils.go:500] [remaining 14m30s] Expecting at most 2 pods to be scheduled to drained node "310d2184-6584-443c-83cf-1df6982bea38", got 4
I0906 15:12:20.171174 5527 utils.go:455] [remaining 14m25s] Node "310d2184-6584-443c-83cf-1df6982bea38" is mark unschedulable as expected
I0906 15:12:20.177183 5527 utils.go:474] [remaining 14m25s] Have 3 pods scheduled to node "310d2184-6584-443c-83cf-1df6982bea38"
I0906 15:12:20.178915 5527 utils.go:490] [remaining 14m25s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:12:20.178944 5527 utils.go:500] [remaining 14m25s] Expecting at most 2 pods to be scheduled to drained node "310d2184-6584-443c-83cf-1df6982bea38", got 3
I0906 15:12:25.170112 5527 utils.go:455] [remaining 14m20s] Node "310d2184-6584-443c-83cf-1df6982bea38" is mark unschedulable as expected
I0906 15:12:25.176608 5527 utils.go:474] [remaining 14m20s] Have 2 pods scheduled to node "310d2184-6584-443c-83cf-1df6982bea38"
I0906 15:12:25.178235 5527 utils.go:490] [remaining 14m20s] RC ReadyReplicas: 20, Replicas: 20
I0906 15:12:25.178259 5527 utils.go:504] [remaining 14m20s] Expected result: all pods from the RC up to last one or two got scheduled to a different node while respecting PDB
STEP: Validating the machine is deleted
E0906 15:12:25.179998 5527 infra.go:454] Machine "machine1" not yet deleted
E0906 15:12:30.182527 5527 infra.go:454] Machine "machine1" not yet deleted
I0906 15:12:35.182231 5527 infra.go:463] Machine "machine1" successfully deleted
STEP: Validate underlying node corresponding to machine1 is removed as well
I0906 15:12:35.183733 5527 utils.go:530] [15m0s remaining] Node "310d2184-6584-443c-83cf-1df6982bea38" successfully deleted
STEP: Delete PDB
STEP: Delete machine2
STEP: waiting for cluster to get back to original size. Final size should be 5 nodes
I0906 15:12:35.191084 5527 utils.go:239] [remaining 15m0s] Cluster size expected to be 5 nodes
I0906 15:12:35.197497 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:12:35.197522 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:12:35.197532 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:12:35.197541 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:12:35.201850 5527 utils.go:231] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab". Ready: true. Unschedulable: false
I0906 15:12:35.201871 5527 utils.go:231] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546". Ready: true. Unschedulable: false
I0906 15:12:35.201881 5527 utils.go:231] Node "927f2a33-8b87-455d-9a89-7c030aa4fcf2". Ready: true. Unschedulable: true
I0906 15:12:35.201889 5527 utils.go:231] Node "b3408843-b44c-4857-ab9d-3b13ab158aea". Ready: true. Unschedulable: false
I0906 15:12:35.201897 5527 utils.go:231] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b". Ready: true. Unschedulable: false
I0906 15:12:35.201909 5527 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:12:35.205873 5527 utils.go:87] Cluster size is 6 nodes
I0906 15:12:40.206153 5527 utils.go:239] [remaining 14m55s] Cluster size expected to be 5 nodes
I0906 15:12:40.209599 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:12:40.209627 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:12:40.209637 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:12:40.209646 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:12:40.212928 5527 utils.go:231] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab". Ready: true. Unschedulable: false
I0906 15:12:40.212949 5527 utils.go:231] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546". Ready: true. Unschedulable: false
I0906 15:12:40.212955 5527 utils.go:231] Node "927f2a33-8b87-455d-9a89-7c030aa4fcf2". Ready: true. Unschedulable: true
I0906 15:12:40.212963 5527 utils.go:231] Node "b3408843-b44c-4857-ab9d-3b13ab158aea". Ready: true. Unschedulable: false
I0906 15:12:40.212973 5527 utils.go:231] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b". Ready: true. Unschedulable: false
I0906 15:12:40.212981 5527 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:12:40.216851 5527 utils.go:87] Cluster size is 6 nodes
I0906 15:12:45.206118 5527 utils.go:239] [remaining 14m50s] Cluster size expected to be 5 nodes
I0906 15:12:45.209024 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:12:45.209051 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:12:45.209061 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:12:45.209070 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:12:45.212171 5527 utils.go:231] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab". Ready: true. Unschedulable: false
I0906 15:12:45.212193 5527 utils.go:231] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546". Ready: true. Unschedulable: false
I0906 15:12:45.212203 5527 utils.go:231] Node "927f2a33-8b87-455d-9a89-7c030aa4fcf2". Ready: true. Unschedulable: true
I0906 15:12:45.212212 5527 utils.go:231] Node "b3408843-b44c-4857-ab9d-3b13ab158aea". Ready: true. Unschedulable: false
I0906 15:12:45.212220 5527 utils.go:231] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b". Ready: true. Unschedulable: false
I0906 15:12:45.212228 5527 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:12:45.216040 5527 utils.go:87] Cluster size is 6 nodes
I0906 15:12:50.206134 5527 utils.go:239] [remaining 14m45s] Cluster size expected to be 5 nodes
I0906 15:12:50.209012 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:12:50.209034 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:12:50.209040 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:12:50.209046 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:12:50.211890 5527 utils.go:231] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab". Ready: true. Unschedulable: false
I0906 15:12:50.211910 5527 utils.go:231] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546". Ready: true. Unschedulable: false
I0906 15:12:50.211916 5527 utils.go:231] Node "927f2a33-8b87-455d-9a89-7c030aa4fcf2". Ready: true. Unschedulable: true
I0906 15:12:50.211921 5527 utils.go:231] Node "b3408843-b44c-4857-ab9d-3b13ab158aea". Ready: true. Unschedulable: false
I0906 15:12:50.211929 5527 utils.go:231] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b". Ready: true. Unschedulable: false
I0906 15:12:50.211937 5527 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:12:50.218996 5527 utils.go:87] Cluster size is 6 nodes
I0906 15:12:55.206486 5527 utils.go:239] [remaining 14m40s] Cluster size expected to be 5 nodes
I0906 15:12:55.209887 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset" replicas 1. Ready: 1, available 1
I0906 15:12:55.209921 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-blue" replicas 1. Ready: 1, available 1
I0906 15:12:55.209933 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-green" replicas 1. Ready: 1, available 1
I0906 15:12:55.209944 5527 utils.go:99] MachineSet "kubemark-actuator-testing-machineset-red" replicas 1. Ready: 1, available 1
I0906 15:12:55.212888 5527 utils.go:231] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab". Ready: true. Unschedulable: false
I0906 15:12:55.212917 5527 utils.go:231] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546". Ready: true. Unschedulable: false
I0906 15:12:55.212928 5527 utils.go:231] Node "b3408843-b44c-4857-ab9d-3b13ab158aea". Ready: true. Unschedulable: false
I0906 15:12:55.212937 5527 utils.go:231] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b". Ready: true. Unschedulable: false
I0906 15:12:55.212947 5527 utils.go:231] Node "minikube". Ready: true. Unschedulable: false
I0906 15:12:55.216206 5527 utils.go:87] Cluster size is 5 nodes
I0906 15:12:55.216237 5527 utils.go:257] waiting for all nodes to be ready
I0906 15:12:55.220294 5527 utils.go:262] waiting for all nodes to be schedulable
I0906 15:12:55.223696 5527 utils.go:290] [remaining 1m0s] Node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab" is schedulable
I0906 15:12:55.223727 5527 utils.go:290] [remaining 1m0s] Node "8d76d38d-5446-4aef-802c-ad0fcfdb4546" is schedulable
I0906 15:12:55.223740 5527 utils.go:290] [remaining 1m0s] Node "b3408843-b44c-4857-ab9d-3b13ab158aea" is schedulable
I0906 15:12:55.223750 5527 utils.go:290] [remaining 1m0s] Node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b" is schedulable
I0906 15:12:55.223761 5527 utils.go:290] [remaining 1m0s] Node "minikube" is schedulable
I0906 15:12:55.223770 5527 utils.go:267] waiting for each node to be backed by a machine
I0906 15:12:55.232480 5527 utils.go:47] [remaining 3m0s] Expecting the same number of machines and nodes, have 5 nodes and 5 machines
I0906 15:12:55.232512 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-6pt7l" is linked to node "6ed3bc5e-d85d-4e5c-bce4-61d11ef633ab"
I0906 15:12:55.232527 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-blue-hpgct" is linked to node "8d76d38d-5446-4aef-802c-ad0fcfdb4546"
I0906 15:12:55.232541 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-green-scthk" is linked to node "b3408843-b44c-4857-ab9d-3b13ab158aea"
I0906 15:12:55.232555 5527 utils.go:70] [remaining 3m0s] Machine "kubemark-actuator-testing-machineset-red-s4l9g" is linked to node "c81bafaa-7edf-4fb4-b5c9-b78f1548066b"
I0906 15:12:55.232569 5527 utils.go:70] [remaining 3m0s] Machine "minikube-static-machine" is linked to node "minikube"
I0906 15:12:55.242816 5527 utils.go:378] [15m0s remaining] Found 0 number of nodes with map[node-role.kubernetes.io/worker: node-draining-test:54ff90f1-d0b8-11e9-978c-0a445740e986] label as expected
• [SLOW TEST:85.174 seconds]
[Feature:Machines] Managed cluster should
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:126
drain node before removing machine resource
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:346
------------------------------
[Feature:Machines] Managed cluster should
reject invalid machinesets
/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra/infra.go:487
I0906 15:12:55.242925 5527 framework.go:406] >>> kubeConfig: /root/.kube/config
STEP: Creating invalid machineset
STEP: Waiting for ReconcileError MachineSet event
I0906 15:12:55.327608 5527 infra.go:506] Fetching ReconcileError MachineSet invalid-machineset event
I0906 15:12:55.327648 5527 infra.go:512] Found ReconcileError event for "invalid-machineset" machine set with the following message: "invalid-machineset" machineset validation failed: spec.template.metadata.labels: Invalid value: map[string]string{"big-kitty":"i-am-bit-kitty"}: `selector` does not match template `labels`
STEP: Verify no machine from "invalid-machineset" machineset were created
I0906 15:12:55.330968 5527 infra.go:528] Have 0 machines generated from "invalid-machineset" machineset
STEP: Deleting invalid machineset
•
Ran 7 of 16 Specs in 202.323 seconds
SUCCESS! -- 7 Passed | 0 Failed | 0 Pending | 9 Skipped
--- PASS: TestE2E (202.32s)
PASS
ok github.com/openshift/cluster-api-actuator-pkg/pkg/e2e 202.381s
make[1]: Leaving directory `/tmp/tmp.3XEIfW31vl/src/github.com/openshift/cluster-api-actuator-pkg'
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: RUN E2E TESTS [00h 04m 28s] ##########
[PostBuildScript] - Executing post build scripts.
[workspace] $ /bin/bash /tmp/jenkins6506429167453858818.sh
########## STARTING STAGE: DOWNLOAD ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/gathered
+ rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/gathered
+ mkdir -p /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/gathered
+ tree /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/gathered
/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/gathered
0 directories, 0 files
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins5975698528833924011.sh
########## STARTING STAGE: GENERATE ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/generated
+ rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/generated
+ mkdir /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/generated
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo docker version && sudo docker info && sudo docker images && sudo docker ps -a 2>&1'
WARNING: You're not using the default seccomp profile
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo cat /etc/sysconfig/docker /etc/sysconfig/docker-network /etc/sysconfig/docker-storage /etc/sysconfig/docker-storage-setup /etc/systemd/system/docker.service 2>&1'
+ true
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo find /var/lib/docker/containers -name *.log | sudo xargs tail -vn +1 2>&1'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo ausearch -m AVC -m SELINUX_ERR -m USER_AVC 2>&1'
+ true
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo df -T -h && sudo pvs && sudo vgs && sudo lvs && sudo findmnt --all 2>&1'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo yum list installed 2>&1'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl --dmesg --no-pager --all --lines=all 2>&1'
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl _PID=1 --no-pager --all --lines=all 2>&1'
+ tree /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/generated
/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/generated
├── avc_denials.log
├── containers.log
├── dmesg.log
├── docker.config
├── docker.info
├── filesystem.info
├── installed_packages.log
└── pid1.journal
0 directories, 8 files
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins7054677787600306698.sh
########## STARTING STAGE: FETCH SYSTEMD JOURNALS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/journals
+ rm -rf /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/journals
+ mkdir /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/journals
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit docker.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit dnsmasq.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all
+ tree /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/journals
/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/artifacts/journals
├── dnsmasq.service
├── docker.service
└── systemd-journald.service
0 directories, 3 files
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins2021010689578215985.sh
########## STARTING STAGE: ASSEMBLE GCS OUTPUT ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
+ trap 'exit 0' EXIT
+ mkdir -p gcs/artifacts gcs/artifacts/generated gcs/artifacts/journals gcs/artifacts/gathered
++ python -c 'import json; import urllib; print json.load(urllib.urlopen('\''https://ci.openshift.redhat.com/jenkins/job/pull-ci-openshift-machine-api-operator-master-e2e/716/api/json'\''))['\''result'\'']'
+ result=SUCCESS
+ cat
++ date +%s
+ cat /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/builds/716/log
+ cp artifacts/generated/avc_denials.log artifacts/generated/containers.log artifacts/generated/dmesg.log artifacts/generated/docker.config artifacts/generated/docker.info artifacts/generated/filesystem.info artifacts/generated/installed_packages.log artifacts/generated/pid1.journal gcs/artifacts/generated/
+ cp artifacts/journals/dnsmasq.service artifacts/journals/docker.service artifacts/journals/systemd-journald.service gcs/artifacts/journals/
+ cp -r 'artifacts/gathered/*' gcs/artifacts/
cp: cannot stat ‘artifacts/gathered/*’: No such file or directory
++ export status=FAILURE
++ status=FAILURE
+ exit 0
[workspace] $ /bin/bash /tmp/jenkins5167898857619189381.sh
########## STARTING STAGE: PUSH THE ARTIFACTS AND METADATA ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ mktemp
+ script=/tmp/tmp.O7bm8Z9vJ4
+ cat
+ chmod +x /tmp/tmp.O7bm8Z9vJ4
+ scp -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.O7bm8Z9vJ4 openshiftdevel:/tmp/tmp.O7bm8Z9vJ4
+ ssh -F /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 300 /tmp/tmp.O7bm8Z9vJ4"'
+ cd /home/origin
+ trap 'exit 0' EXIT
+ [[ -n {"type":"presubmit","job":"pull-ci-openshift-machine-api-operator-master-e2e","buildid":"1169987167749935104","prowjobid":"2695ec19-d0b6-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"machine-api-operator","repo_link":"https://github.com/openshift/machine-api-operator","base_ref":"master","base_sha":"474e14e4965a8c5e6788417c851ccc7fad1acb3a","base_link":"https://github.com/openshift/machine-api-operator/commit/474e14e4965a8c5e6788417c851ccc7fad1acb3a","pulls":[{"number":389,"author":"sadasu","sha":"229c7ea627e98ef3b7c1927a25352d366fea7023","link":"https://github.com/openshift/machine-api-operator/pull/389","commit_link":"https://github.com/openshift/machine-api-operator/pull/389/commits/229c7ea627e98ef3b7c1927a25352d366fea7023","author_link":"https://github.com/sadasu"}]}} ]]
++ jq --compact-output '.buildid |= "716"'
+ JOB_SPEC='{"type":"presubmit","job":"pull-ci-openshift-machine-api-operator-master-e2e","buildid":"716","prowjobid":"2695ec19-d0b6-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"machine-api-operator","repo_link":"https://github.com/openshift/machine-api-operator","base_ref":"master","base_sha":"474e14e4965a8c5e6788417c851ccc7fad1acb3a","base_link":"https://github.com/openshift/machine-api-operator/commit/474e14e4965a8c5e6788417c851ccc7fad1acb3a","pulls":[{"number":389,"author":"sadasu","sha":"229c7ea627e98ef3b7c1927a25352d366fea7023","link":"https://github.com/openshift/machine-api-operator/pull/389","commit_link":"https://github.com/openshift/machine-api-operator/pull/389/commits/229c7ea627e98ef3b7c1927a25352d366fea7023","author_link":"https://github.com/sadasu"}]}}'
+ docker run -e 'JOB_SPEC={"type":"presubmit","job":"pull-ci-openshift-machine-api-operator-master-e2e","buildid":"716","prowjobid":"2695ec19-d0b6-11e9-a06a-0a58ac108d5e","refs":{"org":"openshift","repo":"machine-api-operator","repo_link":"https://github.com/openshift/machine-api-operator","base_ref":"master","base_sha":"474e14e4965a8c5e6788417c851ccc7fad1acb3a","base_link":"https://github.com/openshift/machine-api-operator/commit/474e14e4965a8c5e6788417c851ccc7fad1acb3a","pulls":[{"number":389,"author":"sadasu","sha":"229c7ea627e98ef3b7c1927a25352d366fea7023","link":"https://github.com/openshift/machine-api-operator/pull/389","commit_link":"https://github.com/openshift/machine-api-operator/pull/389/commits/229c7ea627e98ef3b7c1927a25352d366fea7023","author_link":"https://github.com/sadasu"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/gcsupload:latest --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin '/data/gcs/*'
Unable to find image 'registry.svc.ci.openshift.org/ci/gcsupload:latest' locally
Trying to pull repository registry.svc.ci.openshift.org/ci/gcsupload ...
latest: Pulling from registry.svc.ci.openshift.org/ci/gcsupload
a073c86ecf9e: Already exists
cc3fc741b1a9: Already exists
822bed51ba40: Pulling fs layer
85cea451eec0: Pulling fs layer
85cea451eec0: Verifying Checksum
85cea451eec0: Download complete
822bed51ba40: Download complete
822bed51ba40: Pull complete
85cea451eec0: Pull complete
Digest: sha256:03aad50d7ec631ee07c12ac2ba679bd48c7781f7d5754f9e0dcc4e7260e35208
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/gcsupload:latest
{"component":"gcsupload","file":"prow/gcsupload/run.go:107","func":"k8s.io/test-infra/prow/gcsupload.Options.assembleTargets","level":"warning","msg":"Encountered error in resolving items to upload for /data/gcs/*: stat /data/gcs/*: no such file or directory","time":"2019-09-06T15:13:16Z"}
{"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-machine-api-operator-master-e2e/716.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T15:13:16Z"}
{"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-machine-api-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T15:13:16Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_machine-api-operator/389/pull-ci-openshift-machine-api-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:64","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload","level":"info","msg":"Queued for upload","time":"2019-09-06T15:13:16Z"}
{"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-machine-api-operator-master-e2e/716.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T15:13:17Z"}
{"component":"gcsupload","dest":"pr-logs/pull/openshift_machine-api-operator/389/pull-ci-openshift-machine-api-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T15:13:17Z"}
{"component":"gcsupload","dest":"pr-logs/directory/pull-ci-openshift-machine-api-operator-master-e2e/latest-build.txt","file":"prow/pod-utils/gcs/upload.go:70","func":"k8s.io/test-infra/prow/pod-utils/gcs.upload.func1","level":"info","msg":"Finished upload","time":"2019-09-06T15:13:17Z"}
{"component":"gcsupload","file":"prow/gcsupload/run.go:65","func":"k8s.io/test-infra/prow/gcsupload.Options.Run","level":"info","msg":"Finished upload to GCS","time":"2019-09-06T15:13:17Z"}
+ exit 0
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: PUSH THE ARTIFACTS AND METADATA [00h 00m 06s] ##########
[workspace] $ /bin/bash /tmp/jenkins2649458443145145662.sh
########## STARTING STAGE: DEPROVISION CLOUD RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate ]]
+ source /var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed
++ export PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config
+ oct deprovision
PLAYBOOK: main.yml *************************************************************
4 plays in /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml
PLAY [ensure we have the parameters necessary to deprovision virtual hosts] ****
TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:9
skipping: [localhost] => (item=origin_ci_inventory_dir) => {
"changed": false,
"generated_timestamp": "2019-09-06 11:13:18.727101",
"item": "origin_ci_inventory_dir",
"skip_reason": "Conditional check failed",
"skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_region) => {
"changed": false,
"generated_timestamp": "2019-09-06 11:13:18.729657",
"item": "origin_ci_aws_region",
"skip_reason": "Conditional check failed",
"skipped": true
}
PLAY [deprovision virtual hosts in EC2] ****************************************
TASK [Gathering Facts] *********************************************************
ok: [localhost]
TASK [deprovision a virtual EC2 host] ******************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:28
included: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml for localhost
TASK [update the SSH configuration to remove AWS EC2 specifics] ****************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:2
ok: [localhost] => {
"changed": false,
"generated_timestamp": "2019-09-06 11:13:19.545389",
"msg": ""
}
TASK [rename EC2 instance for termination reaper] ******************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:8
changed: [localhost] => {
"changed": true,
"generated_timestamp": "2019-09-06 11:13:20.212821",
"msg": "Tags {'Name': 'oct-terminate'} created for resource i-06550787d42cc325e."
}
TASK [tear down the EC2 instance] **********************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:15
changed: [localhost] => {
"changed": true,
"generated_timestamp": "2019-09-06 11:13:21.285786",
"instance_ids": [
"i-06550787d42cc325e"
],
"instances": [
{
"ami_launch_index": "0",
"architecture": "x86_64",
"block_device_mapping": {
"/dev/sda1": {
"delete_on_termination": true,
"status": "attached",
"volume_id": "vol-03d9d644224906960"
},
"/dev/sdb": {
"delete_on_termination": true,
"status": "attached",
"volume_id": "vol-0bbfd421d51201f8f"
}
},
"dns_name": "ec2-52-200-5-193.compute-1.amazonaws.com",
"ebs_optimized": false,
"groups": {
"sg-7e73221a": "default"
},
"hypervisor": "xen",
"id": "i-06550787d42cc325e",
"image_id": "ami-0b77b87a37c3e662c",
"instance_type": "m4.xlarge",
"kernel": null,
"key_name": "libra",
"launch_time": "2019-09-06T14:54:32.000Z",
"placement": "us-east-1c",
"private_dns_name": "ip-172-18-28-208.ec2.internal",
"private_ip": "172.18.28.208",
"public_dns_name": "ec2-52-200-5-193.compute-1.amazonaws.com",
"public_ip": "52.200.5.193",
"ramdisk": null,
"region": "us-east-1",
"root_device_name": "/dev/sda1",
"root_device_type": "ebs",
"state": "running",
"state_code": 16,
"tags": {
"Name": "oct-terminate",
"openshift_etcd": "",
"openshift_master": "",
"openshift_node": ""
},
"tenancy": "default",
"virtualization_type": "hvm"
}
],
"tagged_instances": []
}
TASK [remove the serialized host variables] ************************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:22
changed: [localhost] => {
"changed": true,
"generated_timestamp": "2019-09-06 11:13:21.523102",
"path": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.28.208.yml",
"state": "absent"
}
PLAY [deprovision virtual hosts locally manged by Vagrant] *********************
TASK [Gathering Facts] *********************************************************
ok: [localhost]
PLAY [clean up local configuration for deprovisioned instances] ****************
TASK [remove inventory configuration directory] ********************************
task path: /var/lib/jenkins/origin-ci-tool/2b40f3e11aadb569dc9c0c9fb90e7273658ce6ed/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:61
changed: [localhost] => {
"changed": true,
"generated_timestamp": "2019-09-06 11:13:22.014626",
"path": "/var/lib/jenkins/jobs/pull-ci-openshift-machine-api-operator-master-e2e/workspace/.config/origin-ci-tool/inventory",
"state": "absent"
}
PLAY RECAP *********************************************************************
localhost : ok=8 changed=4 unreachable=0 failed=0
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPROVISION CLOUD RESOURCES [00h 00m 05s] ##########
Archiving artifacts
Recording test results
[WS-CLEANUP] Deleting project workspace...
[WS-CLEANUP] Deferred wipeout is used...
[WS-CLEANUP] done
Finished: SUCCESS