Console Output

Started by user Steve Kuznetsov
[EnvInject] - Loading node environment variables.
Building in workspace /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace
[WS-CLEANUP] Deleting project workspace...
[WS-CLEANUP] Done
[workspace] $ /bin/bash /tmp/hudson6682847206754431464.sh
########## STARTING STAGE: INSTALL THE ORIGIN-CI-TOOL ##########
+ [[ -s /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate ]]
++ readlink /var/lib/jenkins/origin-ci-tool/latest
+ latest=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
+ touch /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
+ cp /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin/activate /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate
+ cat
+ source /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ export PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
+ mkdir -p /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
+ rm -rf /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool
+ oct configure ansible-client verbosity 2
Option verbosity updated to be 2.
+ oct configure aws-client keypair_name libra
Option keypair_name updated to be libra.
+ oct configure aws-client private_key_path /var/lib/jenkins/.ssh/devenv.pem
Option private_key_path updated to be /var/lib/jenkins/.ssh/devenv.pem.
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL THE ORIGIN-CI-TOOL [00h 00m 01s] ##########
[workspace] $ /bin/bash /tmp/hudson5396930418452626345.sh
########## STARTING STAGE: PROVISION CLOUD RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ export PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
+ oct provision remote all-in-one --os rhel --stage build --provider aws --discrete-ssh-config --name test_branch_origin_aggregated_logging_prior_3

PLAYBOOK: aws-up.yml ***********************************************************
2 plays in /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml

PLAY [ensure we have the parameters necessary to bring up the AWS EC2 instance] ***

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml:9
skipping: [localhost] => (item=origin_ci_inventory_dir)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.448343", 
    "item": "origin_ci_inventory_dir", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_keypair_name)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.451953", 
    "item": "origin_ci_aws_keypair_name", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_private_key_path)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.456854", 
    "item": "origin_ci_aws_private_key_path", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_region)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.461396", 
    "item": "origin_ci_aws_region", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_ami_os)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.465896", 
    "item": "origin_ci_aws_ami_os", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_ami_stage)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.469181", 
    "item": "origin_ci_aws_ami_stage", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_instance_name)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.473983", 
    "item": "origin_ci_aws_instance_name", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_instance_type)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.478487", 
    "item": "origin_ci_aws_master_instance_type", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_identifying_tag_key)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.481726", 
    "item": "origin_ci_aws_identifying_tag_key", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_hostname)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.486759", 
    "item": "origin_ci_aws_hostname", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_ssh_config_strategy)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.489835", 
    "item": "origin_ci_ssh_config_strategy", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=openshift_schedulable)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.494240", 
    "item": "openshift_schedulable", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=openshift_node_labels)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.501682", 
    "item": "openshift_node_labels", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/aws-up.yml:28
skipping: [localhost] => (item=origin_ci_aws_master_subnet)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.563760", 
    "item": "origin_ci_aws_master_subnet", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_etcd_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.568466", 
    "item": "origin_ci_aws_etcd_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_node_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.574183", 
    "item": "origin_ci_aws_node_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.580260", 
    "item": "origin_ci_aws_master_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_external_elb_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.585952", 
    "item": "origin_ci_aws_master_external_elb_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_master_internal_elb_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.591619", 
    "item": "origin_ci_aws_master_internal_elb_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_router_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.597247", 
    "item": "origin_ci_aws_router_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_router_elb_security_group)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:55.604690", 
    "item": "origin_ci_aws_router_elb_security_group", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

PLAY [provision an AWS EC2 instance] *******************************************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

TASK [inventory : initialize the inventory directory] **************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:2
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:56.486236", 
    "gid": 995, 
    "group": "jenkins", 
    "mode": "0755", 
    "owner": "jenkins", 
    "path": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 997
}

TASK [inventory : add the nested group mapping] ********************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:7
changed: [localhost] => {
    "changed": true, 
    "checksum": "18aaee00994df38cc3a63b635893175235331a9c", 
    "dest": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/nested_group_mappings", 
    "generated_timestamp": "2017-06-09 12:02:57.011101", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "b30c3226ea63efa3ff9c5e346c14a16e", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 93, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1497024176.77-220516603294477/source", 
    "state": "file", 
    "uid": 997
}

TASK [inventory : initialize the OSEv3 group variables directory] **************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:12
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2017-06-09 12:02:57.211489", 
    "gid": 995, 
    "group": "jenkins", 
    "mode": "0755", 
    "owner": "jenkins", 
    "path": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/group_vars/OSEv3", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 997
}

TASK [inventory : initialize the host variables directory] *********************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:17
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2017-06-09 12:02:57.404009", 
    "gid": 995, 
    "group": "jenkins", 
    "mode": "0755", 
    "owner": "jenkins", 
    "path": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/host_vars", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 997
}

TASK [inventory : add the default Origin installation configuration] ***********
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/inventory/tasks/main.yml:22
changed: [localhost] => {
    "changed": true, 
    "checksum": "4c06ba508f055c20f13426e8587342e8765a7b66", 
    "dest": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/group_vars/OSEv3/general.yml", 
    "generated_timestamp": "2017-06-09 12:02:57.746640", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "8aec71c75f7d512b278ae7c6f2959b12", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 331, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1497024177.59-97582585517542/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : determine if we are inside AWS EC2] *****************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:2
changed: [localhost] => {
    "changed": true, 
    "cmd": [
        "curl", 
        "-s", 
        "http://instance-data.ec2.internal"
    ], 
    "delta": "0:00:00.015303", 
    "end": "2017-06-09 12:02:58.019019", 
    "failed": false, 
    "failed_when_result": false, 
    "generated_timestamp": "2017-06-09 12:02:58.038070", 
    "rc": 0, 
    "start": "2017-06-09 12:02:58.003716", 
    "stderr": [], 
    "stdout": [
        "1.0", 
        "2007-01-19", 
        "2007-03-01", 
        "2007-08-29", 
        "2007-10-10", 
        "2007-12-15", 
        "2008-02-01", 
        "2008-09-01", 
        "2009-04-04", 
        "2011-01-01", 
        "2011-05-01", 
        "2012-01-12", 
        "2014-02-25", 
        "2014-11-05", 
        "2015-10-20", 
        "2016-04-19", 
        "2016-06-30", 
        "2016-09-02", 
        "latest"
    ], 
    "warnings": [
        "Consider using get_url or uri module rather than running curl"
    ]
}
 [WARNING]: Consider using get_url or uri module rather than running curl

TASK [aws-up : configure EC2 parameters for inventory when controlling from inside EC2] ***
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:7
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_destination_variable": "private_dns_name", 
        "origin_ci_aws_host_address_variable": "private_ip", 
        "origin_ci_aws_vpc_destination_variable": "private_ip_address"
    }, 
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:58.110521"
}

TASK [aws-up : determine where to put the AWS API cache] ***********************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:14
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_cache_dir": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/.ec2_cache"
    }, 
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:02:58.187691"
}

TASK [aws-up : ensure we have a place to put the AWS API cache] ****************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:18
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2017-06-09 12:02:58.430379", 
    "gid": 995, 
    "group": "jenkins", 
    "mode": "0755", 
    "owner": "jenkins", 
    "path": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/.ec2_cache", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 997
}

TASK [aws-up : place the EC2 dynamic inventory script] *************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:23
changed: [localhost] => {
    "changed": true, 
    "checksum": "625b8af723189db3b96ba0026d0f997a0025bc47", 
    "dest": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/ec2.py", 
    "generated_timestamp": "2017-06-09 12:02:58.794046", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "cac06c14065dac74904232b89d4ba24c", 
    "mode": "0755", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 63725, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1497024178.65-95457702633589/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : place the EC2 dynamic inventory configuration] ******************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:29
changed: [localhost] => {
    "changed": true, 
    "checksum": "168bc8df67664b1e1be82567ca152c1a114e356a", 
    "dest": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/ec2.ini", 
    "generated_timestamp": "2017-06-09 12:02:59.155242", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "d63d2440661e1082c344dba6ea3beda8", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 407, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1497024178.87-101828330602015/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : place the EC2 tag to group mappings] ****************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:34
changed: [localhost] => {
    "changed": true, 
    "checksum": "b4205a33dc73f62bd4f77f35d045cf8e09ae62b0", 
    "dest": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/tag_to_group_mappings", 
    "generated_timestamp": "2017-06-09 12:02:59.479678", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "bc3a567a1b6f342e1005182efc1b66be", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 287, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1497024179.35-73931872445504/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : determine which AMI to use] *************************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:39
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:04.495872", 
    "results": [
        {
            "ami_id": "ami-a3adf2b5", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 35, 
                    "snapshot_id": "snap-e88694f4", 
                    "volume_type": "gp2"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 35, 
                    "snapshot_id": "snap-2bcc80c9", 
                    "volume_type": "gp2"
                }
            }, 
            "creationDate": "2017-06-09T02:39:07.000Z", 
            "description": "OpenShift Origin development AMI on rhel at the build stage.", 
            "hypervisor": "xen", 
            "is_public": false, 
            "location": "531415883065/ami_build_origin_int_rhel_build_382", 
            "name": "ami_build_origin_int_rhel_build_382", 
            "owner_id": "531415883065", 
            "platform": null, 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "available", 
            "tags": {
                "Name": "ami_build_origin_int_rhel_build_382", 
                "image_stage": "build", 
                "operating_system": "rhel", 
                "ready": "yes"
            }, 
            "virtualization_type": "hvm"
        }, 
        {
            "ami_id": "ami-6c6d337a", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 35, 
                    "snapshot_id": "snap-0e151e77", 
                    "volume_type": "gp2"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 35, 
                    "snapshot_id": "snap-67d9ede3", 
                    "volume_type": "gp2"
                }
            }, 
            "creationDate": "2017-06-09T13:50:40.000Z", 
            "description": "OpenShift Origin development AMI on rhel at the build stage.", 
            "hypervisor": "xen", 
            "is_public": false, 
            "location": "531415883065/ami_build_origin_int_rhel_build_384", 
            "name": "ami_build_origin_int_rhel_build_384", 
            "owner_id": "531415883065", 
            "platform": null, 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "available", 
            "tags": {
                "Name": "ami_build_origin_int_rhel_build_384", 
                "image_stage": "build", 
                "operating_system": "rhel", 
                "ready": "yes"
            }, 
            "virtualization_type": "hvm"
        }, 
        {
            "ami_id": "ami-2494ca32", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 35, 
                    "snapshot_id": "snap-d54de324", 
                    "volume_type": "gp2"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "encrypted": false, 
                    "size": 35, 
                    "snapshot_id": "snap-6b72bd6e", 
                    "volume_type": "gp2"
                }
            }, 
            "creationDate": "2017-06-09T15:41:54.000Z", 
            "description": "OpenShift Origin development AMI on rhel at the build stage.", 
            "hypervisor": "xen", 
            "is_public": false, 
            "location": "531415883065/ami_build_origin_int_rhel_build_385", 
            "name": "ami_build_origin_int_rhel_build_385", 
            "owner_id": "531415883065", 
            "platform": null, 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "available", 
            "tags": {
                "Name": "ami_build_origin_int_rhel_build_385", 
                "image_stage": "build", 
                "operating_system": "rhel", 
                "ready": "yes"
            }, 
            "virtualization_type": "hvm"
        }
    ]
}

TASK [aws-up : determine which AMI to use] *************************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:51
ok: [localhost] => (item={u'ami_id': u'ami-a3adf2b5', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the build stage.', u'tags': {u'ready': u'yes', u'operating_system': u'rhel', u'image_stage': u'build', u'Name': u'ami_build_origin_int_rhel_build_382'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-2bcc80c9', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 35}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-e88694f4', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 35}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_build_382', u'is_public': False, u'creationDate': u'2017-06-09T02:39:07.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_build_382'}) => {
    "ansible_facts": {
        "origin_ci_aws_ami_id": "ami-a3adf2b5"
    }, 
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:04.584162", 
    "item": {
        "ami_id": "ami-a3adf2b5", 
        "architecture": "x86_64", 
        "block_device_mapping": {
            "/dev/sda1": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 35, 
                "snapshot_id": "snap-e88694f4", 
                "volume_type": "gp2"
            }, 
            "/dev/sdb": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 35, 
                "snapshot_id": "snap-2bcc80c9", 
                "volume_type": "gp2"
            }
        }, 
        "creationDate": "2017-06-09T02:39:07.000Z", 
        "description": "OpenShift Origin development AMI on rhel at the build stage.", 
        "hypervisor": "xen", 
        "is_public": false, 
        "location": "531415883065/ami_build_origin_int_rhel_build_382", 
        "name": "ami_build_origin_int_rhel_build_382", 
        "owner_id": "531415883065", 
        "platform": null, 
        "root_device_name": "/dev/sda1", 
        "root_device_type": "ebs", 
        "state": "available", 
        "tags": {
            "Name": "ami_build_origin_int_rhel_build_382", 
            "image_stage": "build", 
            "operating_system": "rhel", 
            "ready": "yes"
        }, 
        "virtualization_type": "hvm"
    }
}
ok: [localhost] => (item={u'ami_id': u'ami-6c6d337a', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the build stage.', u'tags': {u'ready': u'yes', u'operating_system': u'rhel', u'image_stage': u'build', u'Name': u'ami_build_origin_int_rhel_build_384'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-67d9ede3', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 35}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-0e151e77', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 35}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_build_384', u'is_public': False, u'creationDate': u'2017-06-09T13:50:40.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_build_384'}) => {
    "ansible_facts": {
        "origin_ci_aws_ami_id": "ami-6c6d337a"
    }, 
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:04.587851", 
    "item": {
        "ami_id": "ami-6c6d337a", 
        "architecture": "x86_64", 
        "block_device_mapping": {
            "/dev/sda1": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 35, 
                "snapshot_id": "snap-0e151e77", 
                "volume_type": "gp2"
            }, 
            "/dev/sdb": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 35, 
                "snapshot_id": "snap-67d9ede3", 
                "volume_type": "gp2"
            }
        }, 
        "creationDate": "2017-06-09T13:50:40.000Z", 
        "description": "OpenShift Origin development AMI on rhel at the build stage.", 
        "hypervisor": "xen", 
        "is_public": false, 
        "location": "531415883065/ami_build_origin_int_rhel_build_384", 
        "name": "ami_build_origin_int_rhel_build_384", 
        "owner_id": "531415883065", 
        "platform": null, 
        "root_device_name": "/dev/sda1", 
        "root_device_type": "ebs", 
        "state": "available", 
        "tags": {
            "Name": "ami_build_origin_int_rhel_build_384", 
            "image_stage": "build", 
            "operating_system": "rhel", 
            "ready": "yes"
        }, 
        "virtualization_type": "hvm"
    }
}
ok: [localhost] => (item={u'ami_id': u'ami-2494ca32', u'root_device_type': u'ebs', u'description': u'OpenShift Origin development AMI on rhel at the build stage.', u'tags': {u'ready': u'yes', u'operating_system': u'rhel', u'image_stage': u'build', u'Name': u'ami_build_origin_int_rhel_build_385'}, u'hypervisor': u'xen', u'block_device_mapping': {u'/dev/sdb': {u'encrypted': False, u'snapshot_id': u'snap-6b72bd6e', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 35}, u'/dev/sda1': {u'encrypted': False, u'snapshot_id': u'snap-d54de324', u'delete_on_termination': True, u'volume_type': u'gp2', u'size': 35}}, u'architecture': u'x86_64', u'owner_id': u'531415883065', u'platform': None, u'state': u'available', u'location': u'531415883065/ami_build_origin_int_rhel_build_385', u'is_public': False, u'creationDate': u'2017-06-09T15:41:54.000Z', u'root_device_name': u'/dev/sda1', u'virtualization_type': u'hvm', u'name': u'ami_build_origin_int_rhel_build_385'}) => {
    "ansible_facts": {
        "origin_ci_aws_ami_id": "ami-2494ca32"
    }, 
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:04.594907", 
    "item": {
        "ami_id": "ami-2494ca32", 
        "architecture": "x86_64", 
        "block_device_mapping": {
            "/dev/sda1": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 35, 
                "snapshot_id": "snap-d54de324", 
                "volume_type": "gp2"
            }, 
            "/dev/sdb": {
                "delete_on_termination": true, 
                "encrypted": false, 
                "size": 35, 
                "snapshot_id": "snap-6b72bd6e", 
                "volume_type": "gp2"
            }
        }, 
        "creationDate": "2017-06-09T15:41:54.000Z", 
        "description": "OpenShift Origin development AMI on rhel at the build stage.", 
        "hypervisor": "xen", 
        "is_public": false, 
        "location": "531415883065/ami_build_origin_int_rhel_build_385", 
        "name": "ami_build_origin_int_rhel_build_385", 
        "owner_id": "531415883065", 
        "platform": null, 
        "root_device_name": "/dev/sda1", 
        "root_device_type": "ebs", 
        "state": "available", 
        "tags": {
            "Name": "ami_build_origin_int_rhel_build_385", 
            "image_stage": "build", 
            "operating_system": "rhel", 
            "ready": "yes"
        }, 
        "virtualization_type": "hvm"
    }
}

TASK [aws-up : determine which subnets are available] **************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:57
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:05.183418", 
    "subnets": [
        {
            "availability_zone": "us-east-1d", 
            "available_ip_address_count": 3889, 
            "cidr_block": "172.18.0.0/20", 
            "default_for_az": "false", 
            "id": "subnet-cf57c596", 
            "map_public_ip_on_launch": "true", 
            "state": "available", 
            "tags": {
                "Name": "devenv-subnet-1", 
                "origin_ci_aws_cluster_component": "master_subnet"
            }, 
            "vpc_id": "vpc-69705d0c"
        }, 
        {
            "availability_zone": "us-east-1c", 
            "available_ip_address_count": 4086, 
            "cidr_block": "172.18.16.0/20", 
            "default_for_az": "false", 
            "id": "subnet-8bdb5ac2", 
            "map_public_ip_on_launch": "true", 
            "state": "available", 
            "tags": {
                "Name": "devenv-subnet-2", 
                "origin_ci_aws_cluster_component": "master_subnet"
            }, 
            "vpc_id": "vpc-69705d0c"
        }
    ]
}

TASK [aws-up : determine which subnets to use for the master] ******************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:64
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_master_subnet_ids": [
            "subnet-cf57c596", 
            "subnet-8bdb5ac2"
        ]
    }, 
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:05.251691"
}

TASK [aws-up : determine which security groups are available] ******************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:69
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:05.924228", 
    "security_groups": [
        {
            "description": "default VPC security group", 
            "group_id": "sg-7e73221a", 
            "group_name": "default", 
            "ip_permissions": [
                {
                    "ip_protocol": "-1", 
                    "ip_ranges": [], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "user_id_group_pairs": [
                        {
                            "group_id": "sg-7e73221a", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-d0b6baad", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-38b1bd45", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-0fb30570", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-29ab1d56", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-390c0246", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-61050b1e", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-9f1b15e0", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-4f121c30", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-3e2b2541", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-6d2b2512", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-cb0711b4", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-a4091fdb", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-bc2036c3", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-258d925a", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-bd1cc3c3", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-5333ec2d", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-282ff156", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-95c77deb", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-06ba0078", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-5ba32325", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-4b088835", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-3fdb5141", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-e6ddac98", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-d86b1ba6", 
                            "user_id": "531415883065"
                        }, 
                        {
                            "group_id": "sg-3f4d3d41", 
                            "user_id": "531415883065"
                        }
                    ]
                }, 
                {
                    "from_port": 53, 
                    "ip_protocol": "tcp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "174.7.77.40/32"
                        }, 
                        {
                            "cidr_ip": "119.254.120.64/26"
                        }, 
                        {
                            "cidr_ip": "119.254.196.64/26"
                        }, 
                        {
                            "cidr_ip": "209.132.176.0/20"
                        }, 
                        {
                            "cidr_ip": "209.132.186.34/32"
                        }, 
                        {
                            "cidr_ip": "213.175.37.10/32"
                        }, 
                        {
                            "cidr_ip": "62.40.79.66/32"
                        }, 
                        {
                            "cidr_ip": "66.187.224.0/20"
                        }, 
                        {
                            "cidr_ip": "66.187.239.0/24"
                        }, 
                        {
                            "cidr_ip": "38.140.108.0/24"
                        }, 
                        {
                            "cidr_ip": "213.175.37.9/32"
                        }, 
                        {
                            "cidr_ip": "38.99.12.232/29"
                        }, 
                        {
                            "cidr_ip": "108.49.212.36/32"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 8444, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": 22, 
                    "ip_protocol": "tcp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "0.0.0.0/0"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 22, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": 80, 
                    "ip_protocol": "tcp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "54.241.19.245/32"
                        }, 
                        {
                            "cidr_ip": "97.65.119.184/29"
                        }, 
                        {
                            "cidr_ip": "107.20.219.35/32"
                        }, 
                        {
                            "cidr_ip": "108.166.48.153/32"
                        }, 
                        {
                            "cidr_ip": "212.199.177.64/27"
                        }, 
                        {
                            "cidr_ip": "212.72.208.162/32"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 443, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": 53, 
                    "ip_protocol": "udp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "209.132.176.0/20"
                        }, 
                        {
                            "cidr_ip": "66.187.224.0/20"
                        }, 
                        {
                            "cidr_ip": "66.187.239.0/24"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": 53, 
                    "user_id_group_pairs": []
                }, 
                {
                    "from_port": -1, 
                    "ip_protocol": "icmp", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "0.0.0.0/0"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "to_port": -1, 
                    "user_id_group_pairs": []
                }
            ], 
            "ip_permissions_egress": [
                {
                    "ip_protocol": "-1", 
                    "ip_ranges": [
                        {
                            "cidr_ip": "0.0.0.0/0"
                        }
                    ], 
                    "ipv6_ranges": [], 
                    "prefix_list_ids": [], 
                    "user_id_group_pairs": []
                }
            ], 
            "owner_id": "531415883065", 
            "tags": {
                "Name": "devenv-vpc", 
                "origin_ci_aws_cluster_component": "master_security_group"
            }, 
            "vpc_id": "vpc-69705d0c"
        }
    ]
}

TASK [aws-up : determine which security group to use] **************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:76
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_master_security_group_ids": [
            "sg-7e73221a"
        ]
    }, 
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:06.022305"
}

TASK [aws-up : provision an AWS EC2 instance] **********************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:81
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2017-06-09 12:03:28.518504", 
    "instance_ids": [
        "i-0c2f17a3c842910cc"
    ], 
    "instances": [
        {
            "ami_launch_index": "0", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-0c35e37cab2fd5b4f"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-05555f2f10ac24a54"
                }
            }, 
            "dns_name": "ec2-34-207-254-240.compute-1.amazonaws.com", 
            "ebs_optimized": false, 
            "groups": {
                "sg-7e73221a": "default"
            }, 
            "hypervisor": "xen", 
            "id": "i-0c2f17a3c842910cc", 
            "image_id": "ami-2494ca32", 
            "instance_type": "m4.xlarge", 
            "kernel": null, 
            "key_name": "libra", 
            "launch_time": "2017-06-09T16:03:07.000Z", 
            "placement": "us-east-1d", 
            "private_dns_name": "ip-172-18-7-3.ec2.internal", 
            "private_ip": "172.18.7.3", 
            "public_dns_name": "ec2-34-207-254-240.compute-1.amazonaws.com", 
            "public_ip": "34.207.254.240", 
            "ramdisk": null, 
            "region": "us-east-1", 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "running", 
            "state_code": 16, 
            "tags": {
                "Name": "test_branch_origin_aggregated_logging_prior_3", 
                "openshift_etcd": "", 
                "openshift_master": "", 
                "openshift_node": ""
            }, 
            "tenancy": "default", 
            "virtualization_type": "hvm"
        }
    ], 
    "tagged_instances": []
}

TASK [aws-up : determine the host address] *************************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:107
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_host": "172.18.7.3"
    }, 
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:28.594302"
}

TASK [aws-up : determine the default user to use for SSH] **********************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:111
skipping: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:28.648587", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [aws-up : determine the default user to use for SSH] **********************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:116
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_aws_ssh_user": "origin"
    }, 
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:28.708904"
}

TASK [aws-up : update variables for the host] **********************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:121
changed: [localhost] => {
    "changed": true, 
    "checksum": "7b4d77345e659b675be7a56fdef8204dda8f2097", 
    "dest": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.7.3.yml", 
    "generated_timestamp": "2017-06-09 12:03:29.029488", 
    "gid": 995, 
    "group": "jenkins", 
    "md5sum": "2a4e5a41f4bad133b00c1072504ad75e", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 682, 
    "src": "/var/lib/jenkins/.ansible/tmp/ansible-tmp-1497024208.89-154400461431969/source", 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : determine where updated SSH configuration should go] ************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:138
ok: [localhost] => {
    "ansible_facts": {
        "origin_ci_ssh_config_files": [
            "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/.ssh_config"
        ]
    }, 
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:29.092997"
}

TASK [aws-up : determine where updated SSH configuration should go] ************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:143
skipping: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:03:29.149826", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [aws-up : ensure the targeted SSH configuration file exists] **************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:148
changed: [localhost] => (item=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/.ssh_config) => {
    "changed": true, 
    "dest": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/.ssh_config", 
    "generated_timestamp": "2017-06-09 12:03:29.340519", 
    "gid": 995, 
    "group": "jenkins", 
    "item": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/.ssh_config", 
    "mode": "0644", 
    "owner": "jenkins", 
    "secontext": "system_u:object_r:var_lib_t:s0", 
    "size": 0, 
    "state": "file", 
    "uid": 997
}

TASK [aws-up : update the SSH configuration] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:154
changed: [localhost] => (item=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/.ssh_config) => {
    "changed": true, 
    "generated_timestamp": "2017-06-09 12:03:29.644752", 
    "item": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/.ssh_config", 
    "msg": "Block inserted"
}

TASK [aws-up : wait for SSH to be available] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/provision/roles/aws-up/tasks/main.yml:172
ok: [localhost] => {
    "changed": false, 
    "elapsed": 107, 
    "generated_timestamp": "2017-06-09 12:05:17.080754", 
    "path": null, 
    "port": 22, 
    "search_regex": null, 
    "state": "started"
}

PLAY RECAP *********************************************************************
localhost                  : ok=27   changed=13   unreachable=0    failed=0   

+ set +o xtrace
########## FINISHED STAGE: SUCCESS: PROVISION CLOUD RESOURCES [00h 02m 23s] ##########
[description-setter] Description set: <div>
Using the <a href="https://github.com/openshift/origin-aggregated-logging/tree/master">origin-aggregated-logging master</a> branch.
</div>
[workspace] $ /bin/bash /tmp/hudson4012975398533934291.sh
########## STARTING STAGE: SYNC ORIGIN-AGGREGATED-LOGGING REPOSITORY ##########
+ [[ -s /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ export PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
+ oct sync remote origin-aggregated-logging --branch master

PLAYBOOK: remote.yml ***********************************************************
2 plays in /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/sync/remote.yml

PLAY [ensure we have the parameters necessary to sync from a remote repository] ***

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/sync/remote.yml:9
skipping: [localhost] => (item=origin_ci_hosts)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:05:18.145856", 
    "item": "origin_ci_hosts", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_connection)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:05:18.148134", 
    "item": "origin_ci_connection", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_sync_repository)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:05:18.152571", 
    "item": "origin_ci_sync_repository", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_sync_remote)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:05:18.155666", 
    "item": "origin_ci_sync_remote", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_sync_version)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:05:18.160253", 
    "item": "origin_ci_sync_version", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

PLAY [sync the remote host with code from the remote repository] ***************

TASK [Gathering Facts] *********************************************************
ok: [172.18.7.3]

TASK [remote-sync : ensure that we are given a destination or we can determine one] ***
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/sync/roles/remote-sync/tasks/main.yml:2
skipping: [172.18.7.3] => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:05:39.141251", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [remote-sync : determine the directory to sync if no override is provided] ***
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/sync/roles/remote-sync/tasks/main.yml:7
ok: [172.18.7.3] => {
    "ansible_facts": {
        "origin_ci_sync_destination": "/data/src/github.com/openshift/origin-aggregated-logging"
    }, 
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:05:39.175738"
}

TASK [remote-sync : determine which remotes already exist] *********************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/sync/roles/remote-sync/tasks/main.yml:12
changed: [172.18.7.3] => {
    "changed": true, 
    "cmd": [
        "/usr/bin/git", 
        "remote"
    ], 
    "delta": "0:00:00.909842", 
    "end": "2017-06-09 12:05:40.884906", 
    "generated_timestamp": "2017-06-09 12:05:40.941398", 
    "rc": 0, 
    "start": "2017-06-09 12:05:39.975064", 
    "stderr": [], 
    "stdout": [
        "origin"
    ], 
    "warnings": []
}

TASK [remote-sync : fail if the remote we want doesn't exist and we can't add it] ***
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/sync/roles/remote-sync/tasks/main.yml:18
skipping: [172.18.7.3] => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:05:40.971644", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [remote-sync : add the new remote if it doesn't exist and we have an address to point to] ***
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/sync/roles/remote-sync/tasks/main.yml:23
skipping: [172.18.7.3] => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:05:40.994965", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [remote-sync : inspect the remote for the address if we do not have an address] ***
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/sync/roles/remote-sync/tasks/main.yml:29
changed: [172.18.7.3] => {
    "changed": true, 
    "cmd": "/usr/bin/git remote show origin | grep \"Fetch URL\" | cut -c 14-", 
    "delta": "0:00:08.020083", 
    "end": "2017-06-09 12:05:49.302571", 
    "generated_timestamp": "2017-06-09 12:05:49.364702", 
    "rc": 0, 
    "start": "2017-06-09 12:05:41.282488", 
    "stderr": [], 
    "stdout": [
        "https://github.com/openshift/origin-aggregated-logging.git"
    ], 
    "warnings": []
}

TASK [remote-sync : synchronize the repository with the remote server] *********
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/sync/roles/remote-sync/tasks/main.yml:36
ok: [172.18.7.3] => {
    "after": "3a32de04bd757a3c0dca6c017be67827f263e667", 
    "before": "3a32de04bd757a3c0dca6c017be67827f263e667", 
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:05:56.193935", 
    "remote_url_changed": false, 
    "warnings": []
}

TASK [remote-sync : prune out any refs that should not exist on the remote] ****
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/sync/roles/remote-sync/tasks/main.yml:45
changed: [172.18.7.3] => {
    "changed": true, 
    "cmd": "/usr/bin/git fetch origin --tags --prune", 
    "delta": "0:00:00.142015", 
    "end": "2017-06-09 12:05:56.638579", 
    "generated_timestamp": "2017-06-09 12:05:56.693783", 
    "rc": 0, 
    "start": "2017-06-09 12:05:56.496564", 
    "stderr": [], 
    "stdout": [], 
    "warnings": []
}

TASK [remote-sync : check out the desired post-merge state, if requested] ******
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/sync/roles/remote-sync/tasks/main.yml:50
skipping: [172.18.7.3] => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:05:56.721870", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

TASK [remote-sync : merge the resulting state into another branch, if requested] ***
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/sync/roles/remote-sync/tasks/main.yml:56
skipping: [172.18.7.3] => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 12:05:56.748224", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

PLAY RECAP *********************************************************************
172.18.7.3                 : ok=6    changed=3    unreachable=0    failed=0   
localhost                  : ok=0    changed=0    unreachable=0    failed=0   

+ set +o xtrace
########## FINISHED STAGE: SUCCESS: SYNC ORIGIN-AGGREGATED-LOGGING REPOSITORY [00h 00m 39s] ##########
[workspace] $ /bin/bash /tmp/hudson8683389259126091833.sh
########## STARTING STAGE: USE A RAMDISK FOR ETCD ##########
+ [[ -s /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ export PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ mktemp
+ script=/tmp/tmp.T8SHZ2hlly
+ cat
+ chmod +x /tmp/tmp.T8SHZ2hlly
+ scp -F ./.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.T8SHZ2hlly openshiftdevel:/tmp/tmp.T8SHZ2hlly
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "/tmp/tmp.T8SHZ2hlly"'
+ cd /home/origin
+ sudo su root
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: USE A RAMDISK FOR ETCD [00h 00m 01s] ##########
[workspace] $ /bin/bash /tmp/hudson5135426572610814087.sh
########## STARTING STAGE: INSTALL ANSIBLE PLUGINS ##########
+ [[ -s /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ export PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ mktemp
+ script=/tmp/tmp.7IhIOj96EA
+ cat
+ chmod +x /tmp/tmp.7IhIOj96EA
+ scp -F ./.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.7IhIOj96EA openshiftdevel:/tmp/tmp.7IhIOj96EA
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "/tmp/tmp.7IhIOj96EA"'
+ cd /data/src/github.com/openshift/origin
+ sudo yum install -y python-pip
Loaded plugins: amazon-id, rhui-lb, search-disabled-repos
Resolving Dependencies
--> Running transaction check
---> Package python2-pip.noarch 0:8.1.2-5.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

================================================================================
 Package              Arch            Version               Repository     Size
================================================================================
Installing:
 python2-pip          noarch          8.1.2-5.el7           epel          1.7 M

Transaction Summary
================================================================================
Install  1 Package

Total download size: 1.7 M
Installed size: 7.2 M
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : python2-pip-8.1.2-5.el7.noarch                               1/1 
  Verifying  : python2-pip-8.1.2-5.el7.noarch                               1/1 

Installed:
  python2-pip.noarch 0:8.1.2-5.el7                                              

Complete!
+ sudo pip install junit_xml
Collecting junit_xml
  Downloading junit-xml-1.7.tar.gz
Requirement already satisfied (use --upgrade to upgrade): six in /usr/lib/python2.7/site-packages (from junit_xml)
Installing collected packages: junit-xml
  Running setup.py install for junit-xml: started
    Running setup.py install for junit-xml: finished with status 'done'
Successfully installed junit-xml-1.7
You are using pip version 8.1.2, however version 9.0.1 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
+ sudo chmod o+rw /etc/environment
++ pwd
+ echo ANSIBLE_JUNIT_DIR=/data/src/github.com/openshift/origin/_output/scripts/ansible_junit
+ sudo mkdir -p /usr/share/ansible/plugins/callback
+ for plugin in ''\''default_with_output_lists'\''' ''\''generate_junit'\'''
+ wget https://raw.githubusercontent.com/openshift/origin-ci-tool/master/oct/ansible/oct/callback_plugins/default_with_output_lists.py
--2017-06-09 12:08:18--  https://raw.githubusercontent.com/openshift/origin-ci-tool/master/oct/ansible/oct/callback_plugins/default_with_output_lists.py
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.32.133
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.32.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 1932 (1.9K) [text/plain]
Saving to: ‘default_with_output_lists.py’

     0K .                                                     100% 36.0M=0s

2017-06-09 12:08:18 (36.0 MB/s) - ‘default_with_output_lists.py’ saved [1932/1932]

+ sudo mv default_with_output_lists.py /usr/share/ansible/plugins/callback
+ for plugin in ''\''default_with_output_lists'\''' ''\''generate_junit'\'''
+ wget https://raw.githubusercontent.com/openshift/origin-ci-tool/master/oct/ansible/oct/callback_plugins/generate_junit.py
--2017-06-09 12:08:18--  https://raw.githubusercontent.com/openshift/origin-ci-tool/master/oct/ansible/oct/callback_plugins/generate_junit.py
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.32.133
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.32.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 11110 (11K) [text/plain]
Saving to: ‘generate_junit.py’

     0K ..........                                            100% 77.9M=0s

2017-06-09 12:08:18 (77.9 MB/s) - ‘generate_junit.py’ saved [11110/11110]

+ sudo mv generate_junit.py /usr/share/ansible/plugins/callback
+ sudo sed -r -i -e 's/^#?stdout_callback.*/stdout_callback = default_with_output_lists/' -e 's/^#?callback_whitelist.*/callback_whitelist = generate_junit/' /etc/ansible/ansible.cfg
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: INSTALL ANSIBLE PLUGINS [00h 02m 21s] ##########
[workspace] $ /bin/bash /tmp/hudson7060002293852160571.sh
########## STARTING STAGE: RUN LOGGING TESTS ##########
+ [[ -s /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ export PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ mktemp
+ script=/tmp/tmp.GtYHnEW4Dq
+ cat
+ chmod +x /tmp/tmp.GtYHnEW4Dq
+ scp -F ./.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.GtYHnEW4Dq openshiftdevel:/tmp/tmp.GtYHnEW4Dq
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "/tmp/tmp.GtYHnEW4Dq"'
+ cd /data/src/github.com/openshift/origin-aggregated-logging
++ realpath ./
+ export O_A_L_DIR=/data/src/github.com/openshift/origin-aggregated-logging
+ O_A_L_DIR=/data/src/github.com/openshift/origin-aggregated-logging
++ realpath ./../origin
+ export OS_ROOT=/data/src/github.com/openshift/origin
+ OS_ROOT=/data/src/github.com/openshift/origin
+ export PATH=/usr/lib64/qt-3.3/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin:/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/
+ PATH=/usr/lib64/qt-3.3/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/origin/.local/bin:/home/origin/bin:/data/src/github.com/openshift/origin/_output/local/bin/linux/amd64/
+ export ENABLE_OPS_CLUSTER=true
+ ENABLE_OPS_CLUSTER=true
+ export USE_LOCAL_SOURCE=true
+ USE_LOCAL_SOURCE=true
+ export VERBOSE=1
+ VERBOSE=1
+ pushd hack/testing
/data/src/github.com/openshift/origin-aggregated-logging/hack/testing /data/src/github.com/openshift/origin-aggregated-logging
+ ./logging.sh
/data/src/github.com/openshift/origin /data/src/github.com/openshift/origin-aggregated-logging/hack/testing
/data/src/github.com/openshift/origin-aggregated-logging/hack/testing
/data/src/github.com/openshift/origin-aggregated-logging /data/src/github.com/openshift/origin-aggregated-logging/hack/testing
/data/src/github.com/openshift/origin-aggregated-logging/hack/testing
Loaded plugins: amazon-id, rhui-lb, search-disabled-repos
Metadata Cache Created
Loaded plugins: amazon-id, rhui-lb, search-disabled-repos
Package python2-pip-8.1.2-5.el7.noarch already installed and latest version
Package ansible-2.3.0.0-3.el7.noarch already installed and latest version
Resolving Dependencies
--> Running transaction check
---> Package python2-ruamel-yaml.x86_64 0:0.12.14-9.el7 will be installed
--> Processing Dependency: python2-typing for package: python2-ruamel-yaml-0.12.14-9.el7.x86_64
--> Processing Dependency: python2-ruamel-ordereddict for package: python2-ruamel-yaml-0.12.14-9.el7.x86_64
--> Running transaction check
---> Package python2-ruamel-ordereddict.x86_64 0:0.4.9-3.el7 will be installed
---> Package python2-typing.noarch 0:3.5.2.2-3.el7 will be installed
--> Finished Dependency Resolution

Dependencies Resolved

================================================================================
 Package                         Arch        Version            Repository
                                                                           Size
================================================================================
Installing:
 python2-ruamel-yaml             x86_64      0.12.14-9.el7      li        245 k
Installing for dependencies:
 python2-ruamel-ordereddict      x86_64      0.4.9-3.el7        li         38 k
 python2-typing                  noarch      3.5.2.2-3.el7      epel       39 k

Transaction Summary
================================================================================
Install  1 Package (+2 Dependent packages)

Total download size: 322 k
Installed size: 1.3 M
Downloading packages:
--------------------------------------------------------------------------------
Total                                              1.3 MB/s | 322 kB  00:00     
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
  Installing : python2-ruamel-ordereddict-0.4.9-3.el7.x86_64                1/3 
  Installing : python2-typing-3.5.2.2-3.el7.noarch                          2/3 
  Installing : python2-ruamel-yaml-0.12.14-9.el7.x86_64                     3/3 
  Verifying  : python2-ruamel-yaml-0.12.14-9.el7.x86_64                     1/3 
  Verifying  : python2-typing-3.5.2.2-3.el7.noarch                          2/3 
  Verifying  : python2-ruamel-ordereddict-0.4.9-3.el7.x86_64                3/3 

Installed:
  python2-ruamel-yaml.x86_64 0:0.12.14-9.el7                                    

Dependency Installed:
  python2-ruamel-ordereddict.x86_64 0:0.4.9-3.el7                               
  python2-typing.noarch 0:3.5.2.2-3.el7                                         

Complete!
Cloning into '/tmp/tmp.YFsh2WnHw8/openhift-ansible'...
Copying oc from path to /usr/local/bin for use by openshift-ansible
Copying oc from path to /usr/bin for use by openshift-ansible
Copying oadm from path to /usr/local/bin for use by openshift-ansible
Copying oadm from path to /usr/bin for use by openshift-ansible
[INFO] Starting logging tests at Fri Jun  9 12:10:26 EDT 2017
Generated new key pair as /tmp/openshift/origin-aggregated-logging/openshift.local.config/master/serviceaccounts.public.key and /tmp/openshift/origin-aggregated-logging/openshift.local.config/master/serviceaccounts.private.key
Generating node credentials ...
Created node config for 172.18.7.3 in /tmp/openshift/origin-aggregated-logging/openshift.local.config/node-172.18.7.3
Wrote master config to: /tmp/openshift/origin-aggregated-logging/openshift.local.config/master/master-config.yaml
Running hack/lib/start.sh:352: executing 'oc get --raw /healthz --config='/tmp/openshift/origin-aggregated-logging/openshift.local.config/master/admin.kubeconfig'' expecting any result and text 'ok'; re-trying every 0.25s until completion or 80.000s...
SUCCESS after 0.240s: hack/lib/start.sh:352: executing 'oc get --raw /healthz --config='/tmp/openshift/origin-aggregated-logging/openshift.local.config/master/admin.kubeconfig'' expecting any result and text 'ok'; re-trying every 0.25s until completion or 80.000s
Standard output from the command:
ok
There was no error output from the command.
Running hack/lib/start.sh:353: executing 'oc get --raw https://172.18.7.3:10250/healthz --config='/tmp/openshift/origin-aggregated-logging/openshift.local.config/master/admin.kubeconfig'' expecting any result and text 'ok'; re-trying every 0.5s until completion or 120.000s...
SUCCESS after 0.226s: hack/lib/start.sh:353: executing 'oc get --raw https://172.18.7.3:10250/healthz --config='/tmp/openshift/origin-aggregated-logging/openshift.local.config/master/admin.kubeconfig'' expecting any result and text 'ok'; re-trying every 0.5s until completion or 120.000s
Standard output from the command:
ok
There was no error output from the command.
Running hack/lib/start.sh:354: executing 'oc get --raw /healthz/ready --config='/tmp/openshift/origin-aggregated-logging/openshift.local.config/master/admin.kubeconfig'' expecting any result and text 'ok'; re-trying every 0.25s until completion or 80.000s...
SUCCESS after 0.224s: hack/lib/start.sh:354: executing 'oc get --raw /healthz/ready --config='/tmp/openshift/origin-aggregated-logging/openshift.local.config/master/admin.kubeconfig'' expecting any result and text 'ok'; re-trying every 0.25s until completion or 80.000s
Standard output from the command:
ok
There was no error output from the command.
Running hack/lib/start.sh:355: executing 'oc get service kubernetes --namespace default --config='/tmp/openshift/origin-aggregated-logging/openshift.local.config/master/admin.kubeconfig'' expecting success; re-trying every 0.25s until completion or 160.000s...
SUCCESS after 0.270s: hack/lib/start.sh:355: executing 'oc get service kubernetes --namespace default --config='/tmp/openshift/origin-aggregated-logging/openshift.local.config/master/admin.kubeconfig'' expecting success; re-trying every 0.25s until completion or 160.000s
Standard output from the command:
NAME         CLUSTER-IP   EXTERNAL-IP   PORT(S)                 AGE
kubernetes   172.30.0.1   <none>        443/TCP,53/UDP,53/TCP   7s

There was no error output from the command.
Running hack/lib/start.sh:356: executing 'oc get --raw /api/v1/nodes/172.18.7.3 --config='/tmp/openshift/origin-aggregated-logging/openshift.local.config/master/admin.kubeconfig'' expecting success; re-trying every 0.25s until completion or 80.000s...
SUCCESS after 0.225s: hack/lib/start.sh:356: executing 'oc get --raw /api/v1/nodes/172.18.7.3 --config='/tmp/openshift/origin-aggregated-logging/openshift.local.config/master/admin.kubeconfig'' expecting success; re-trying every 0.25s until completion or 80.000s
Standard output from the command:
{"kind":"Node","apiVersion":"v1","metadata":{"name":"172.18.7.3","selfLink":"/api/v1/nodes/172.18.7.3","uid":"2e56a673-4d2e-11e7-ae30-0e910886c5dc","resourceVersion":"819","creationTimestamp":"2017-06-09T16:10:39Z","labels":{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","kubernetes.io/hostname":"172.18.7.3"},"annotations":{"volumes.kubernetes.io/controller-managed-attach-detach":"true"}},"spec":{"externalID":"172.18.7.3","providerID":"aws:////i-0c2f17a3c842910cc"},"status":{"capacity":{"cpu":"4","memory":"16004804Ki","pods":"40"},"allocatable":{"cpu":"4","memory":"15902404Ki","pods":"40"},"conditions":[{"type":"OutOfDisk","status":"False","lastHeartbeatTime":"2017-06-09T16:10:39Z","lastTransitionTime":"2017-06-09T16:10:39Z","reason":"KubeletHasSufficientDisk","message":"kubelet has sufficient disk space available"},{"type":"MemoryPressure","status":"False","lastHeartbeatTime":"2017-06-09T16:10:39Z","lastTransitionTime":"2017-06-09T16:10:39Z","reason":"KubeletHasSufficientMemory","message":"kubelet has sufficient memory available"},{"type":"DiskPressure","status":"False","lastHeartbeatTime":"2017-06-09T16:10:39Z","lastTransitionTime":"2017-06-09T16:10:39Z","reason":"KubeletHasNoDiskPressure","message":"kubelet has no disk pressure"},{"type":"Ready","status":"False","lastHeartbeatTime":"2017-06-09T16:10:39Z","lastTransitionTime":"2017-06-09T16:10:39Z","reason":"KubeletNotReady","message":"container runtime is down"}],"addresses":[{"type":"LegacyHostIP","address":"172.18.7.3"},{"type":"InternalIP","address":"172.18.7.3"},{"type":"Hostname","address":"172.18.7.3"}],"daemonEndpoints":{"kubeletEndpoint":{"Port":10250}},"nodeInfo":{"machineID":"f9370ed252a14f73b014c1301a9b6d1b","systemUUID":"EC2BF994-37D2-F763-F920-620715B7E844","bootID":"e2c9c401-80c3-4618-89c1-eb89e7ba797a","kernelVersion":"3.10.0-514.21.1.el7.x86_64","osImage":"Red Hat Enterprise Linux Server 7.3 (Maipo)","containerRuntimeVersion":"docker://1.12.6","kubeletVersion":"v1.6.1+5115d708d7","kubeProxyVersion":"v1.6.1+5115d708d7","operatingSystem":"linux","architecture":"amd64"},"images":[{"names":["openshift/origin-federation:1565480","openshift/origin-federation:latest"],"sizeBytes":1202835711},{"names":["openshift/origin-docker-registry:1565480","openshift/origin-docker-registry:latest"],"sizeBytes":1097491120},{"names":["openshift/origin-gitserver:1565480","openshift/origin-gitserver:latest"],"sizeBytes":1083915063},{"names":["openshift/openvswitch:1565480","openshift/openvswitch:latest"],"sizeBytes":1051376470},{"names":["openshift/node:1565480","openshift/node:latest"],"sizeBytes":1049694710},{"names":["openshift/origin-keepalived-ipfailover:1565480","openshift/origin-keepalived-ipfailover:latest"],"sizeBytes":1026502477},{"names":["openshift/origin-haproxy-router:1565480","openshift/origin-haproxy-router:latest"],"sizeBytes":1020731460},{"names":["openshift/origin-f5-router:1565480","openshift/origin-f5-router:latest"],"sizeBytes":999702306},{"names":["openshift/origin-deployer:1565480","openshift/origin-deployer:latest"],"sizeBytes":999702306},{"names":["openshift/origin:1565480","openshift/origin:latest"],"sizeBytes":999702306},{"names":["openshift/origin-sti-builder:1565480","openshift/origin-sti-builder:latest"],"sizeBytes":999702306},{"names":["openshift/origin-docker-builder:1565480","openshift/origin-docker-builder:latest"],"sizeBytes":999702306},{"names":["openshift/origin-recycler:1565480","openshift/origin-recycler:latest"],"sizeBytes":999702306},{"names":["openshift/origin-cluster-capacity:1565480","openshift/origin-cluster-capacity:latest"],"sizeBytes":960445554},{"names":["docker.io/openshift/origin-release@sha256:611f304562f9fed81fa5348ba39ffc3da008ab55eb8f8b18fdfaa598721958aa","docker.io/openshift/origin-release:golang-1.7"],"sizeBytes":852470564},{"names":["docker.io/openshift/origin-logging-auth-proxy:latest"],"sizeBytes":715535365},{"names":["docker.io/node@sha256:46db0dd19955beb87b841c30a6b9812ba626473283e84117d1c016deee5949a9","docker.io/node:0.10.36"],"sizeBytes":697128386},{"names":["docker.io/openshift/origin-logging-deployer:latest","docker.io/openshift/origin-logging-deployment:latest"],"sizeBytes":696821590},{"names":["docker.io/openshift/origin-logging-kibana:latest"],"sizeBytes":682851459},{"names":["docker.io/openshift/origin@sha256:6beee719d8e2555af8d0fc9d3d1bdd623f601417a21948cd4d6146f0cf1d1a46","docker.io/openshift/origin:v1.5.0-alpha.2"],"sizeBytes":526224196},{"names":["docker.io/openshift/origin-logging-elasticsearch:latest"],"sizeBytes":425567171},{"names":["openshift/origin-egress-http-proxy:1565480","openshift/origin-egress-http-proxy:latest"],"sizeBytes":396010589},{"names":["docker.io/openshift/base-centos7@sha256:aea292a3bddba020cde0ee83e6a45807931eb607c164ec6a3674f67039d8cd7c","docker.io/openshift/base-centos7:latest"],"sizeBytes":383049978},{"names":["openshift/origin-egress-router:1565480","openshift/origin-egress-router:latest"],"sizeBytes":364746296},{"names":["openshift/origin-base:latest"],"sizeBytes":363070636},{"names":["docker.io/openshift/origin-logging-fluentd:latest"],"sizeBytes":359225972},{"names":["docker.io/openshift/origin-logging-curator:latest"],"sizeBytes":224977240},{"names":["openshift/origin-pod:1565480","openshift/origin-pod:latest"],"sizeBytes":213198358},{"names":["openshift/origin-source:latest"],"sizeBytes":192548895},{"names":["docker.io/centos@sha256:aebf12af704307dfa0079b3babdca8d7e8ff6564696882bcb5d11f1d461f9ee9","docker.io/centos:7","docker.io/centos:centos7"],"sizeBytes":192548537},{"names":["openshift/hello-openshift:1565480","openshift/hello-openshift:latest"],"sizeBytes":5635113}]}}

There was no error output from the command.
serviceaccount "registry" created
clusterrolebinding "registry-registry-role" created
deploymentconfig "docker-registry" created
service "docker-registry" created
info: password for stats user admin has been set to 6cwk26HCPV
--> Creating router router ...
    serviceaccount "router" created
    clusterrolebinding "router-router-role" created
    deploymentconfig "router" created
    service "router" created
--> Success
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:162: executing 'oadm new-project logging --node-selector=''' expecting success...
SUCCESS after 0.431s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:162: executing 'oadm new-project logging --node-selector=''' expecting success
Standard output from the command:
Created project logging

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:163: executing 'oc project logging > /dev/null' expecting success...
SUCCESS after 0.241s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:163: executing 'oc project logging > /dev/null' expecting success
There was no output from the command.
There was no error output from the command.
apiVersion: v1
items:
- apiVersion: v1
  kind: ImageStream
  metadata:
    labels:
      build: logging-elasticsearch
      component: development
      logging-infra: development
      provider: openshift
    name: logging-elasticsearch
  spec: {}
- apiVersion: v1
  kind: ImageStream
  metadata:
    labels:
      build: logging-fluentd
      component: development
      logging-infra: development
      provider: openshift
    name: logging-fluentd
  spec: {}
- apiVersion: v1
  kind: ImageStream
  metadata:
    labels:
      build: logging-kibana
      component: development
      logging-infra: development
      provider: openshift
    name: logging-kibana
  spec: {}
- apiVersion: v1
  kind: ImageStream
  metadata:
    labels:
      build: logging-curator
      component: development
      logging-infra: development
      provider: openshift
    name: logging-curator
  spec: {}
- apiVersion: v1
  kind: ImageStream
  metadata:
    labels:
      build: logging-auth-proxy
      component: development
      logging-infra: development
      provider: openshift
    name: logging-auth-proxy
  spec: {}
- apiVersion: v1
  kind: ImageStream
  metadata:
    labels:
      build: logging-deployment
      component: development
      logging-infra: development
      provider: openshift
    name: origin
  spec:
    dockerImageRepository: openshift/origin
    tags:
    - from:
        kind: DockerImage
        name: openshift/origin:v1.5.0-alpha.2
      name: v1.5.0-alpha.2
- apiVersion: v1
  kind: BuildConfig
  metadata:
    labels:
      app: logging-elasticsearch
      component: development
      logging-infra: development
      provider: openshift
    name: logging-elasticsearch
  spec:
    output:
      to:
        kind: ImageStreamTag
        name: logging-elasticsearch:latest
    resources: {}
    source:
      contextDir: elasticsearch
      git:
        ref: master
        uri: https://github.com/openshift/origin-aggregated-logging
      type: Git
    strategy:
      dockerStrategy:
        from:
          kind: DockerImage
          name: openshift/base-centos7
      type: Docker
- apiVersion: v1
  kind: BuildConfig
  metadata:
    labels:
      build: logging-fluentd
      component: development
      logging-infra: development
      provider: openshift
    name: logging-fluentd
  spec:
    output:
      to:
        kind: ImageStreamTag
        name: logging-fluentd:latest
    resources: {}
    source:
      contextDir: fluentd
      git:
        ref: master
        uri: https://github.com/openshift/origin-aggregated-logging
      type: Git
    strategy:
      dockerStrategy:
        from:
          kind: DockerImage
          name: openshift/base-centos7
      type: Docker
- apiVersion: v1
  kind: BuildConfig
  metadata:
    labels:
      build: logging-kibana
      component: development
      logging-infra: development
      provider: openshift
    name: logging-kibana
  spec:
    output:
      to:
        kind: ImageStreamTag
        name: logging-kibana:latest
    resources: {}
    source:
      contextDir: kibana
      git:
        ref: master
        uri: https://github.com/openshift/origin-aggregated-logging
      type: Git
    strategy:
      dockerStrategy:
        from:
          kind: DockerImage
          name: openshift/base-centos7
      type: Docker
- apiVersion: v1
  kind: BuildConfig
  metadata:
    labels:
      build: logging-curator
      component: development
      logging-infra: development
      provider: openshift
    name: logging-curator
  spec:
    output:
      to:
        kind: ImageStreamTag
        name: logging-curator:latest
    resources: {}
    source:
      contextDir: curator
      git:
        ref: master
        uri: https://github.com/openshift/origin-aggregated-logging
      type: Git
    strategy:
      dockerStrategy:
        from:
          kind: DockerImage
          name: openshift/base-centos7
      type: Docker
- apiVersion: v1
  kind: BuildConfig
  metadata:
    labels:
      build: logging-auth-proxy
      component: development
      logging-infra: development
      provider: openshift
    name: logging-auth-proxy
  spec:
    output:
      to:
        kind: ImageStreamTag
        name: logging-auth-proxy:latest
    resources: {}
    source:
      contextDir: kibana-proxy
      git:
        ref: master
        uri: https://github.com/openshift/origin-aggregated-logging
      type: Git
    strategy:
      dockerStrategy:
        from:
          kind: DockerImage
          name: library/node:0.10.36
      type: Docker
kind: List
metadata: {}
Running hack/testing/build-images:31: executing 'oc process -o yaml    -f /data/src/github.com/openshift/origin-aggregated-logging/hack/templates/dev-builds-wo-deployer.yaml    -p LOGGING_FORK_URL=https://github.com/openshift/origin-aggregated-logging -p LOGGING_FORK_BRANCH=master    | build_filter | oc create -f -' expecting success...
SUCCESS after 0.397s: hack/testing/build-images:31: executing 'oc process -o yaml    -f /data/src/github.com/openshift/origin-aggregated-logging/hack/templates/dev-builds-wo-deployer.yaml    -p LOGGING_FORK_URL=https://github.com/openshift/origin-aggregated-logging -p LOGGING_FORK_BRANCH=master    | build_filter | oc create -f -' expecting success
Standard output from the command:
imagestream "logging-elasticsearch" created
imagestream "logging-fluentd" created
imagestream "logging-kibana" created
imagestream "logging-curator" created
imagestream "logging-auth-proxy" created
imagestream "origin" created
buildconfig "logging-elasticsearch" created
buildconfig "logging-fluentd" created
buildconfig "logging-kibana" created
buildconfig "logging-curator" created
buildconfig "logging-auth-proxy" created

There was no error output from the command.
Running hack/testing/build-images:9: executing 'oc get imagestreamtag origin:latest' expecting success; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 1.121s: hack/testing/build-images:9: executing 'oc get imagestreamtag origin:latest' expecting success; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
NAME            DOCKER REF                                                                                 UPDATED                  IMAGENAME
origin:latest   openshift/origin@sha256:4019c4653c0ce067bc0d92b83a9e125c9526bae3c0f4645225b298b460c07a19   Less than a second ago   sha256:4019c4653c0ce067bc0d92b83a9e125c9526bae3c0f4645225b298b460c07a19
Standard error from the command:
Error from server (NotFound): imagestreamtags.image.openshift.io "origin:latest" not found
... repeated 2 times
Uploading directory "/data/src/github.com/openshift/origin-aggregated-logging" as binary input for the build ...
build "logging-auth-proxy-1" started
Uploading directory "/data/src/github.com/openshift/origin-aggregated-logging" as binary input for the build ...
build "logging-curator-1" started
Uploading directory "/data/src/github.com/openshift/origin-aggregated-logging" as binary input for the build ...
build "logging-elasticsearch-1" started
Uploading directory "/data/src/github.com/openshift/origin-aggregated-logging" as binary input for the build ...
build "logging-fluentd-1" started
Uploading directory "/data/src/github.com/openshift/origin-aggregated-logging" as binary input for the build ...
build "logging-kibana-1" started
Running hack/testing/build-images:33: executing 'wait_for_builds_complete' expecting success...
SUCCESS after 60.763s: hack/testing/build-images:33: executing 'wait_for_builds_complete' expecting success
Standard output from the command:
Builds are complete

There was no error output from the command.
/tmp/tmp.YFsh2WnHw8/openhift-ansible /data/src/github.com/openshift/origin-aggregated-logging
### Created host inventory file ###
[oo_first_master]
openshift

[oo_first_master:vars]
ansible_become=true
ansible_connection=local
containerized=true
docker_protect_installed_version=true
openshift_deployment_type=origin
deployment_type=origin
required_packages=[]


openshift_hosted_logging_hostname=kibana.127.0.0.1.xip.io
openshift_master_logging_public_url=https://kibana.127.0.0.1.xip.io
openshift_logging_master_public_url=https://172.18.7.3:8443

openshift_logging_image_prefix=172.30.197.120:5000/logging/
openshift_logging_use_ops=true

openshift_logging_fluentd_journal_read_from_head=False
openshift_logging_es_log_appenders=['console']
openshift_logging_use_mux=false
openshift_logging_mux_allow_external=false
openshift_logging_use_mux_client=false





###################################
Running hack/testing/init-log-stack:58: executing 'oc login -u system:admin' expecting success...
SUCCESS after 0.257s: hack/testing/init-log-stack:58: executing 'oc login -u system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.7.3:8443" as "system:admin" using existing credentials.

You have access to the following projects and can switch between them with 'oc project <projectname>':

    default
    kube-public
    kube-system
  * logging
    openshift
    openshift-infra

Using project "logging".

There was no error output from the command.
Using /tmp/tmp.YFsh2WnHw8/openhift-ansible/ansible.cfg as config file

PLAYBOOK: openshift-logging.yml ************************************************
4 plays in /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/byo/openshift-cluster/openshift-logging.yml

PLAY [Create initial host groups for localhost] ********************************
META: ran handlers

TASK [include_vars] ************************************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/byo/openshift-cluster/initialize_groups.yml:10
ok: [localhost] => {
    "ansible_facts": {
        "g_all_hosts": "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | union(g_lb_hosts) | union(g_nfs_hosts) | union(g_new_node_hosts)| union(g_new_master_hosts) | default([]) }}", 
        "g_etcd_hosts": "{{ groups.etcd | default([]) }}", 
        "g_glusterfs_hosts": "{{ groups.glusterfs | default([]) }}", 
        "g_glusterfs_registry_hosts": "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}", 
        "g_lb_hosts": "{{ groups.lb | default([]) }}", 
        "g_master_hosts": "{{ groups.masters | default([]) }}", 
        "g_new_master_hosts": "{{ groups.new_masters | default([]) }}", 
        "g_new_node_hosts": "{{ groups.new_nodes | default([]) }}", 
        "g_nfs_hosts": "{{ groups.nfs | default([]) }}", 
        "g_node_hosts": "{{ groups.nodes | default([]) }}"
    }, 
    "changed": false
}
META: ran handlers
META: ran handlers

PLAY [Populate config host groups] *********************************************
META: ran handlers

TASK [Evaluate groups - g_etcd_hosts required] *********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:8
skipping: [localhost] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [Evaluate groups - g_master_hosts or g_new_master_hosts required] *********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:13
skipping: [localhost] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [Evaluate groups - g_node_hosts or g_new_node_hosts required] *************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:18
skipping: [localhost] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [Evaluate groups - g_lb_hosts required] ***********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:23
skipping: [localhost] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [Evaluate groups - g_nfs_hosts required] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:28
skipping: [localhost] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [Evaluate groups - g_nfs_hosts is single host] ****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:33
skipping: [localhost] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [Evaluate groups - g_glusterfs_hosts required] ****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:38
skipping: [localhost] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [Evaluate oo_all_hosts] ***************************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:43

TASK [Evaluate oo_masters] *****************************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:52

TASK [Evaluate oo_first_master] ************************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:61
skipping: [localhost] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [Evaluate oo_masters_to_config] *******************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:70

TASK [Evaluate oo_etcd_to_config] **********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:79

TASK [Evaluate oo_first_etcd] **************************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:88
skipping: [localhost] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [Evaluate oo_etcd_hosts_to_upgrade] ***************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:100

TASK [Evaluate oo_etcd_hosts_to_backup] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:107
creating host via 'add_host': hostname=openshift
ok: [localhost] => (item=openshift) => {
    "add_host": {
        "groups": [
            "oo_etcd_hosts_to_backup"
        ], 
        "host_name": "openshift", 
        "host_vars": {}
    }, 
    "changed": false, 
    "item": "openshift"
}

TASK [Evaluate oo_nodes_to_config] *********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:114

TASK [Add master to oo_nodes_to_config] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:124

TASK [Evaluate oo_lb_to_config] ************************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:134

TASK [Evaluate oo_nfs_to_config] ***********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:143

TASK [Evaluate oo_glusterfs_to_config] *****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/playbooks/common/openshift-cluster/evaluate_groups.yml:152
META: ran handlers
META: ran handlers

PLAY [OpenShift Aggregated Logging] ********************************************

TASK [Gathering Facts] *********************************************************
ok: [openshift]
META: ran handlers

TASK [openshift_sanitize_inventory : Abort when conflicting deployment type variables are set] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_sanitize_inventory/tasks/main.yml:2
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_sanitize_inventory : Standardize on latest variable names] *****
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_sanitize_inventory/tasks/main.yml:15
ok: [openshift] => {
    "ansible_facts": {
        "deployment_type": "origin", 
        "openshift_deployment_type": "origin"
    }, 
    "changed": false
}

TASK [openshift_sanitize_inventory : Abort when deployment type is invalid] ****
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_sanitize_inventory/tasks/main.yml:23
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_sanitize_inventory : Normalize openshift_release] **************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_sanitize_inventory/tasks/main.yml:31
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_sanitize_inventory : Abort when openshift_release is invalid] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_sanitize_inventory/tasks/main.yml:41
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_facts : Detecting Operating System] ****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:2
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_facts : set_fact] **********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:8
ok: [openshift] => {
    "ansible_facts": {
        "l_is_atomic": false
    }, 
    "changed": false
}

TASK [openshift_facts : set_fact] **********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:10
ok: [openshift] => {
    "ansible_facts": {
        "l_is_containerized": true, 
        "l_is_etcd_system_container": false, 
        "l_is_master_system_container": false, 
        "l_is_node_system_container": false, 
        "l_is_openvswitch_system_container": false
    }, 
    "changed": false
}

TASK [openshift_facts : set_fact] **********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:16
ok: [openshift] => {
    "ansible_facts": {
        "l_any_system_container": false
    }, 
    "changed": false
}

TASK [openshift_facts : set_fact] **********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:18
ok: [openshift] => {
    "ansible_facts": {
        "l_etcd_runtime": "docker"
    }, 
    "changed": false
}

TASK [openshift_facts : Validate python version] *******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:22
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_facts : Validate python version] *******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:29
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_facts : Determine Atomic Host Docker Version] ******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:42
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_facts : assert] ************************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:46
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_facts : Load variables] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:53
ok: [openshift] => (item=/tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/vars/default.yml) => {
    "ansible_facts": {
        "required_packages": [
            "iproute", 
            "python-dbus", 
            "PyYAML", 
            "yum-utils"
        ]
    }, 
    "item": "/tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/vars/default.yml"
}

TASK [openshift_facts : Ensure various deps are installed] *********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:59
ok: [openshift] => (item=iproute) => {
    "changed": false, 
    "item": "iproute", 
    "rc": 0, 
    "results": [
        "iproute-3.10.0-74.el7.x86_64 providing iproute is already installed"
    ]
}
ok: [openshift] => (item=python-dbus) => {
    "changed": false, 
    "item": "python-dbus", 
    "rc": 0, 
    "results": [
        "dbus-python-1.1.1-9.el7.x86_64 providing python-dbus is already installed"
    ]
}
ok: [openshift] => (item=PyYAML) => {
    "changed": false, 
    "item": "PyYAML", 
    "rc": 0, 
    "results": [
        "PyYAML-3.10-11.el7.x86_64 providing PyYAML is already installed"
    ]
}
ok: [openshift] => (item=yum-utils) => {
    "changed": false, 
    "item": "yum-utils", 
    "rc": 0, 
    "results": [
        "yum-utils-1.1.31-40.el7.noarch providing yum-utils is already installed"
    ]
}

TASK [openshift_facts : Ensure various deps for running system containers are installed] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:64
skipping: [openshift] => (item=atomic)  => {
    "changed": false, 
    "item": "atomic", 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}
skipping: [openshift] => (item=ostree)  => {
    "changed": false, 
    "item": "ostree", 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}
skipping: [openshift] => (item=runc)  => {
    "changed": false, 
    "item": "runc", 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_facts : Gather Cluster facts and set is_containerized if needed] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:71
changed: [openshift] => {
    "ansible_facts": {
        "openshift": {
            "common": {
                "admin_binary": "/usr/local/bin/oadm", 
                "all_hostnames": [
                    "ip-172-18-7-3.ec2.internal", 
                    "172.18.7.3", 
                    "ec2-34-207-254-240.compute-1.amazonaws.com", 
                    "34.207.254.240"
                ], 
                "cli_image": "openshift/origin", 
                "client_binary": "/usr/local/bin/oc", 
                "cluster_id": "default", 
                "config_base": "/etc/origin", 
                "data_dir": "/var/lib/origin", 
                "debug_level": "2", 
                "deployer_image": "openshift/origin-deployer", 
                "deployment_subtype": "basic", 
                "deployment_type": "origin", 
                "dns_domain": "cluster.local", 
                "etcd_runtime": "docker", 
                "examples_content_version": "v3.6", 
                "generate_no_proxy_hosts": true, 
                "hostname": "ip-172-18-7-3.ec2.internal", 
                "install_examples": true, 
                "internal_hostnames": [
                    "ip-172-18-7-3.ec2.internal", 
                    "172.18.7.3"
                ], 
                "ip": "172.18.7.3", 
                "is_atomic": false, 
                "is_containerized": true, 
                "is_etcd_system_container": false, 
                "is_master_system_container": false, 
                "is_node_system_container": false, 
                "is_openvswitch_system_container": false, 
                "kube_svc_ip": "172.30.0.1", 
                "pod_image": "openshift/origin-pod", 
                "portal_net": "172.30.0.0/16", 
                "public_hostname": "ec2-34-207-254-240.compute-1.amazonaws.com", 
                "public_ip": "34.207.254.240", 
                "registry_image": "openshift/origin-docker-registry", 
                "router_image": "openshift/origin-haproxy-router", 
                "sdn_network_plugin_name": "redhat/openshift-ovs-subnet", 
                "service_type": "origin", 
                "use_calico": false, 
                "use_contiv": false, 
                "use_dnsmasq": true, 
                "use_flannel": false, 
                "use_manageiq": true, 
                "use_nuage": false, 
                "use_openshift_sdn": true, 
                "version_gte_3_1_1_or_1_1_1": true, 
                "version_gte_3_1_or_1_1": true, 
                "version_gte_3_2_or_1_2": true, 
                "version_gte_3_3_or_1_3": true, 
                "version_gte_3_4_or_1_4": true, 
                "version_gte_3_5_or_1_5": true, 
                "version_gte_3_6": true
            }, 
            "current_config": {
                "roles": [
                    "node", 
                    "docker"
                ]
            }, 
            "docker": {
                "api_version": 1.24, 
                "disable_push_dockerhub": false, 
                "gte_1_10": true, 
                "options": "--log-driver=journald", 
                "service_name": "docker", 
                "version": "1.12.6"
            }, 
            "hosted": {
                "logging": {
                    "selector": null
                }, 
                "metrics": {
                    "selector": null
                }, 
                "registry": {
                    "selector": "region=infra"
                }, 
                "router": {
                    "selector": "region=infra"
                }
            }, 
            "node": {
                "annotations": {}, 
                "iptables_sync_period": "30s", 
                "kubelet_args": {
                    "node-labels": []
                }, 
                "labels": {}, 
                "local_quota_per_fsgroup": "", 
                "node_image": "openshift/node", 
                "node_system_image": "openshift/node", 
                "nodename": "ip-172-18-7-3.ec2.internal", 
                "ovs_image": "openshift/openvswitch", 
                "ovs_system_image": "openshift/openvswitch", 
                "registry_url": "openshift/origin-${component}:${version}", 
                "schedulable": true, 
                "sdn_mtu": "8951", 
                "set_node_ip": false, 
                "storage_plugin_deps": [
                    "ceph", 
                    "glusterfs", 
                    "iscsi"
                ]
            }, 
            "provider": {
                "metadata": {
                    "ami-id": "ami-2494ca32", 
                    "ami-launch-index": "0", 
                    "ami-manifest-path": "(unknown)", 
                    "block-device-mapping": {
                        "ami": "/dev/sda1", 
                        "ebs14": "sdb", 
                        "root": "/dev/sda1"
                    }, 
                    "hostname": "ip-172-18-7-3.ec2.internal", 
                    "instance-action": "none", 
                    "instance-id": "i-0c2f17a3c842910cc", 
                    "instance-type": "m4.xlarge", 
                    "local-hostname": "ip-172-18-7-3.ec2.internal", 
                    "local-ipv4": "172.18.7.3", 
                    "mac": "0e:91:08:86:c5:dc", 
                    "metrics": {
                        "vhostmd": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
                    }, 
                    "network": {
                        "interfaces": {
                            "macs": {
                                "0e:91:08:86:c5:dc": {
                                    "device-number": "0", 
                                    "interface-id": "eni-043adfd9", 
                                    "ipv4-associations": {
                                        "34.207.254.240": "172.18.7.3"
                                    }, 
                                    "local-hostname": "ip-172-18-7-3.ec2.internal", 
                                    "local-ipv4s": "172.18.7.3", 
                                    "mac": "0e:91:08:86:c5:dc", 
                                    "owner-id": "531415883065", 
                                    "public-hostname": "ec2-34-207-254-240.compute-1.amazonaws.com", 
                                    "public-ipv4s": "34.207.254.240", 
                                    "security-group-ids": "sg-7e73221a", 
                                    "security-groups": "default", 
                                    "subnet-id": "subnet-cf57c596", 
                                    "subnet-ipv4-cidr-block": "172.18.0.0/20", 
                                    "vpc-id": "vpc-69705d0c", 
                                    "vpc-ipv4-cidr-block": "172.18.0.0/16", 
                                    "vpc-ipv4-cidr-blocks": "172.18.0.0/16"
                                }
                            }
                        }
                    }, 
                    "placement": {
                        "availability-zone": "us-east-1d"
                    }, 
                    "profile": "default-hvm", 
                    "public-hostname": "ec2-34-207-254-240.compute-1.amazonaws.com", 
                    "public-ipv4": "34.207.254.240", 
                    "public-keys/": "0=libra", 
                    "reservation-id": "r-06d321b46f7006656", 
                    "security-groups": "default", 
                    "services": {
                        "domain": "amazonaws.com", 
                        "partition": "aws"
                    }
                }, 
                "name": "aws", 
                "network": {
                    "hostname": "ip-172-18-7-3.ec2.internal", 
                    "interfaces": [
                        {
                            "ips": [
                                "172.18.7.3"
                            ], 
                            "network_id": "subnet-cf57c596", 
                            "network_type": "vpc", 
                            "public_ips": [
                                "34.207.254.240"
                            ]
                        }
                    ], 
                    "ip": "172.18.7.3", 
                    "ipv6_enabled": false, 
                    "public_hostname": "ec2-34-207-254-240.compute-1.amazonaws.com", 
                    "public_ip": "34.207.254.240"
                }, 
                "zone": "us-east-1d"
            }
        }
    }, 
    "changed": true
}

TASK [openshift_facts : Set repoquery command] *********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_facts/tasks/main.yml:99
ok: [openshift] => {
    "ansible_facts": {
        "repoquery_cmd": "repoquery --plugins"
    }, 
    "changed": false
}

TASK [openshift_logging : fail] ************************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/main.yaml:2
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Set default image variables based on deployment_type] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/main.yaml:6
ok: [openshift] => (item=/tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/vars/default_images.yml) => {
    "ansible_facts": {
        "__openshift_logging_image_prefix": "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}", 
        "__openshift_logging_image_version": "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
    }, 
    "item": "/tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/vars/default_images.yml"
}

TASK [openshift_logging : Set logging image facts] *****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/main.yaml:12
ok: [openshift] => {
    "ansible_facts": {
        "openshift_logging_image_prefix": "172.30.197.120:5000/logging/", 
        "openshift_logging_image_version": "latest"
    }, 
    "changed": false
}

TASK [openshift_logging : Create temp directory for doing work in] *************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/main.yaml:17
ok: [openshift] => {
    "changed": false, 
    "cmd": [
        "mktemp", 
        "-d", 
        "/tmp/openshift-logging-ansible-XXXXXX"
    ], 
    "delta": "0:00:00.002626", 
    "end": "2017-06-09 12:41:39.019390", 
    "rc": 0, 
    "start": "2017-06-09 12:41:39.016764"
}

STDOUT:

/tmp/openshift-logging-ansible-bYxzV8

TASK [openshift_logging : debug] ***********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/main.yaml:24
ok: [openshift] => {
    "changed": false
}

MSG:

Created temp dir /tmp/openshift-logging-ansible-bYxzV8

TASK [openshift_logging : Create local temp directory for doing work in] *******
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/main.yaml:26
ok: [openshift -> 127.0.0.1] => {
    "changed": false, 
    "cmd": [
        "mktemp", 
        "-d", 
        "/tmp/openshift-logging-ansible-XXXXXX"
    ], 
    "delta": "0:00:00.002597", 
    "end": "2017-06-09 12:41:39.211238", 
    "rc": 0, 
    "start": "2017-06-09 12:41:39.208641"
}

STDOUT:

/tmp/openshift-logging-ansible-JLU047

TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/main.yaml:33
included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml for openshift

TASK [openshift_logging : Gather OpenShift Logging Facts] **********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:2
ok: [openshift] => {
    "ansible_facts": {
        "openshift_logging_facts": {
            "curator": {
                "clusterrolebindings": {}, 
                "configmaps": {}, 
                "daemonsets": {}, 
                "deploymentconfigs": {}, 
                "oauthclients": {}, 
                "pvcs": {}, 
                "rolebindings": {}, 
                "routes": {}, 
                "sccs": {}, 
                "secrets": {}, 
                "services": {}
            }, 
            "curator_ops": {
                "clusterrolebindings": {}, 
                "configmaps": {}, 
                "daemonsets": {}, 
                "deploymentconfigs": {}, 
                "oauthclients": {}, 
                "pvcs": {}, 
                "rolebindings": {}, 
                "routes": {}, 
                "sccs": {}, 
                "secrets": {}, 
                "services": {}
            }, 
            "elasticsearch": {
                "clusterrolebindings": {}, 
                "configmaps": {}, 
                "daemonsets": {}, 
                "deploymentconfigs": {}, 
                "oauthclients": {}, 
                "pvcs": {}, 
                "rolebindings": {}, 
                "routes": {}, 
                "sccs": {}, 
                "secrets": {}, 
                "services": {}
            }, 
            "elasticsearch_ops": {
                "clusterrolebindings": {}, 
                "configmaps": {}, 
                "daemonsets": {}, 
                "deploymentconfigs": {}, 
                "oauthclients": {}, 
                "pvcs": {}, 
                "rolebindings": {}, 
                "routes": {}, 
                "sccs": {}, 
                "secrets": {}, 
                "services": {}
            }, 
            "fluentd": {
                "clusterrolebindings": {}, 
                "configmaps": {}, 
                "daemonsets": {}, 
                "deploymentconfigs": {}, 
                "oauthclients": {}, 
                "pvcs": {}, 
                "rolebindings": {}, 
                "routes": {}, 
                "sccs": {}, 
                "secrets": {}, 
                "services": {}
            }, 
            "kibana": {
                "clusterrolebindings": {}, 
                "configmaps": {}, 
                "daemonsets": {}, 
                "deploymentconfigs": {}, 
                "oauthclients": {}, 
                "pvcs": {}, 
                "rolebindings": {}, 
                "routes": {}, 
                "sccs": {}, 
                "secrets": {}, 
                "services": {}
            }, 
            "kibana_ops": {
                "clusterrolebindings": {}, 
                "configmaps": {}, 
                "daemonsets": {}, 
                "deploymentconfigs": {}, 
                "oauthclients": {}, 
                "pvcs": {}, 
                "rolebindings": {}, 
                "routes": {}, 
                "sccs": {}, 
                "secrets": {}, 
                "services": {}
            }
        }
    }, 
    "changed": false
}

TASK [openshift_logging : Set logging project] *********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:7
ok: [openshift] => {
    "changed": false, 
    "results": {
        "cmd": "/bin/oc get namespace logging -o json", 
        "results": {
            "apiVersion": "v1", 
            "kind": "Namespace", 
            "metadata": {
                "annotations": {
                    "openshift.io/description": "", 
                    "openshift.io/display-name": "", 
                    "openshift.io/node-selector": "", 
                    "openshift.io/sa.scc.mcs": "s0:c7,c4", 
                    "openshift.io/sa.scc.supplemental-groups": "1000050000/10000", 
                    "openshift.io/sa.scc.uid-range": "1000050000/10000"
                }, 
                "creationTimestamp": "2017-06-09T16:10:43Z", 
                "name": "logging", 
                "resourceVersion": "932", 
                "selfLink": "/api/v1/namespaces/logging", 
                "uid": "30bbfebb-4d2e-11e7-ae30-0e910886c5dc"
            }, 
            "spec": {
                "finalizers": [
                    "openshift.io/origin", 
                    "kubernetes"
                ]
            }, 
            "status": {
                "phase": "Active"
            }
        }, 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging : Labeling logging project] ****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:13

TASK [openshift_logging : Labeling logging project] ****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:26
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Create logging cert directory] ***********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:39
ok: [openshift] => {
    "changed": false, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/etc/origin/logging", 
    "secontext": "unconfined_u:object_r:etc_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 0
}

TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:47
included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml for openshift

TASK [openshift_logging : Checking for ca.key] *********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:3
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Checking for ca.crt] *********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:8
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Checking for ca.serial.txt] **************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:13
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Generate certificates] *******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:18
changed: [openshift] => {
    "changed": true, 
    "cmd": [
        "/usr/local/bin/oc", 
        "adm", 
        "--config=/tmp/openshift-logging-ansible-bYxzV8/admin.kubeconfig", 
        "ca", 
        "create-signer-cert", 
        "--key=/etc/origin/logging/ca.key", 
        "--cert=/etc/origin/logging/ca.crt", 
        "--serial=/etc/origin/logging/ca.serial.txt", 
        "--name=logging-signer-test"
    ], 
    "delta": "0:00:00.358957", 
    "end": "2017-06-09 12:41:44.317092", 
    "rc": 0, 
    "start": "2017-06-09 12:41:43.958135"
}

TASK [openshift_logging : Checking for signing.conf] ***************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:29
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : template] ********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:34
changed: [openshift] => {
    "changed": true, 
    "checksum": "a5a1bda430be44f982fa9097778b7d35d2e42780", 
    "dest": "/etc/origin/logging/signing.conf", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "449087446670073f2899aac33113350c", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "system_u:object_r:etc_t:s0", 
    "size": 4263, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026504.51-234055112807475/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:39
included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml for openshift
included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml for openshift
included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml for openshift

TASK [openshift_logging : Checking for kibana.crt] *****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:2
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Checking for kibana.key] *****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:7
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Trying to discover server cert variable name for kibana] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Trying to discover the server key variable name for kibana] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:20
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Creating signed server cert and key for kibana] ******
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:28
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Copying server key for kibana to generated certs directory] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:40
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Copying Server cert for kibana to generated certs directory] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:50
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Checking for kibana-ops.crt] *************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:2
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Checking for kibana-ops.key] *************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:7
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Trying to discover server cert variable name for kibana-ops] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Trying to discover the server key variable name for kibana-ops] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:20
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Creating signed server cert and key for kibana-ops] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:28
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Copying server key for kibana-ops to generated certs directory] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:40
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Copying Server cert for kibana-ops to generated certs directory] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:50
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Checking for kibana-internal.crt] ********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:2
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Checking for kibana-internal.key] ********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:7
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Trying to discover server cert variable name for kibana-internal] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Trying to discover the server key variable name for kibana-internal] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:20
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Creating signed server cert and key for kibana-internal] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:28
changed: [openshift] => {
    "changed": true, 
    "cmd": [
        "/usr/local/bin/oc", 
        "adm", 
        "--config=/tmp/openshift-logging-ansible-bYxzV8/admin.kubeconfig", 
        "ca", 
        "create-server-cert", 
        "--key=/etc/origin/logging/kibana-internal.key", 
        "--cert=/etc/origin/logging/kibana-internal.crt", 
        "--hostnames=kibana, kibana-ops, kibana.127.0.0.1.xip.io, kibana-ops.router.default.svc.cluster.local", 
        "--signer-cert=/etc/origin/logging/ca.crt", 
        "--signer-key=/etc/origin/logging/ca.key", 
        "--signer-serial=/etc/origin/logging/ca.serial.txt"
    ], 
    "delta": "0:00:00.589291", 
    "end": "2017-06-09 12:41:46.972238", 
    "rc": 0, 
    "start": "2017-06-09 12:41:46.382947"
}

TASK [openshift_logging : Copying server key for kibana-internal to generated certs directory] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:40
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Copying Server cert for kibana-internal to generated certs directory] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/procure_server_certs.yaml:50
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:48
skipping: [openshift] => (item={u'procure_component': u'mux', u'hostnames': u'logging-mux, mux.router.default.svc.cluster.local'})  => {
    "cert_info": {
        "hostnames": "logging-mux, mux.router.default.svc.cluster.local", 
        "procure_component": "mux"
    }, 
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:56
skipping: [openshift] => (item={u'procure_component': u'mux'})  => {
    "changed": false, 
    "shared_key_info": {
        "procure_component": "mux"
    }, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:63
skipping: [openshift] => (item={u'procure_component': u'es', u'hostnames': u'es, es.router.default.svc.cluster.local'})  => {
    "cert_info": {
        "hostnames": "es, es.router.default.svc.cluster.local", 
        "procure_component": "es"
    }, 
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:71
skipping: [openshift] => (item={u'procure_component': u'es-ops', u'hostnames': u'es-ops, es-ops.router.default.svc.cluster.local'})  => {
    "cert_info": {
        "hostnames": "es-ops, es-ops.router.default.svc.cluster.local", 
        "procure_component": "es-ops"
    }, 
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Copy proxy TLS configuration file] *******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:81
changed: [openshift] => {
    "changed": true, 
    "checksum": "36991681e03970736a99be9f084773521c44db06", 
    "dest": "/etc/origin/logging/server-tls.json", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "2a954195add2b2fdde4ed09ff5c8e1c5", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "system_u:object_r:etc_t:s0", 
    "size": 321, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026507.51-26908547379967/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging : Copy proxy TLS configuration file] *******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:86
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Checking for ca.db] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:91
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : copy] ************************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:96
changed: [openshift] => {
    "changed": true, 
    "checksum": "da39a3ee5e6b4b0d3255bfef95601890afd80709", 
    "dest": "/etc/origin/logging/ca.db", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "d41d8cd98f00b204e9800998ecf8427e", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "system_u:object_r:etc_t:s0", 
    "size": 0, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026507.94-37240661479796/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging : Checking for ca.crt.srl] *****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:101
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : copy] ************************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:106
changed: [openshift] => {
    "changed": true, 
    "checksum": "da39a3ee5e6b4b0d3255bfef95601890afd80709", 
    "dest": "/etc/origin/logging/ca.crt.srl", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "d41d8cd98f00b204e9800998ecf8427e", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "system_u:object_r:etc_t:s0", 
    "size": 0, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026508.33-169401548303386/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging : Generate PEM certs] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:111
included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml for openshift
included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml for openshift
included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml for openshift
included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml for openshift

TASK [openshift_logging : Checking for system.logging.fluentd.key] *************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:2
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Checking for system.logging.fluentd.crt] *************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:7
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Creating cert req for system.logging.fluentd] ********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Creating cert req for system.logging.fluentd] ********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:22
changed: [openshift] => {
    "changed": true, 
    "cmd": [
        "openssl", 
        "req", 
        "-out", 
        "/etc/origin/logging/system.logging.fluentd.csr", 
        "-new", 
        "-newkey", 
        "rsa:2048", 
        "-keyout", 
        "/etc/origin/logging/system.logging.fluentd.key", 
        "-subj", 
        "/CN=system.logging.fluentd/OU=OpenShift/O=Logging", 
        "-days", 
        "712", 
        "-nodes"
    ], 
    "delta": "0:00:00.214305", 
    "end": "2017-06-09 12:41:49.463643", 
    "rc": 0, 
    "start": "2017-06-09 12:41:49.249338"
}

STDERR:

Generating a 2048 bit RSA private key
.......................................................+++
...............................+++
writing new private key to '/etc/origin/logging/system.logging.fluentd.key'
-----

TASK [openshift_logging : Sign cert request with CA for system.logging.fluentd] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:31
changed: [openshift] => {
    "changed": true, 
    "cmd": [
        "openssl", 
        "ca", 
        "-in", 
        "/etc/origin/logging/system.logging.fluentd.csr", 
        "-notext", 
        "-out", 
        "/etc/origin/logging/system.logging.fluentd.crt", 
        "-config", 
        "/etc/origin/logging/signing.conf", 
        "-extensions", 
        "v3_req", 
        "-batch", 
        "-extensions", 
        "server_ext"
    ], 
    "delta": "0:00:00.009212", 
    "end": "2017-06-09 12:41:49.614278", 
    "rc": 0, 
    "start": "2017-06-09 12:41:49.605066"
}

STDERR:

Using configuration from /etc/origin/logging/signing.conf
Check that the request matches the signature
Signature ok
Certificate Details:
        Serial Number: 2 (0x2)
        Validity
            Not Before: Jun  9 16:41:49 2017 GMT
            Not After : Jun  9 16:41:49 2019 GMT
        Subject:
            organizationName          = Logging
            organizationalUnitName    = OpenShift
            commonName                = system.logging.fluentd
        X509v3 extensions:
            X509v3 Key Usage: critical
                Digital Signature, Key Encipherment
            X509v3 Basic Constraints: 
                CA:FALSE
            X509v3 Extended Key Usage: 
                TLS Web Server Authentication, TLS Web Client Authentication
            X509v3 Subject Key Identifier: 
                58:AF:CA:76:E8:FE:97:54:A4:81:92:DC:BF:EA:DF:76:9F:F9:47:2E
            X509v3 Authority Key Identifier: 
                0.
Certificate is to be certified until Jun  9 16:41:49 2019 GMT (730 days)

Write out database with 1 new entries
Data Base Updated

TASK [openshift_logging : Checking for system.logging.kibana.key] **************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:2
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Checking for system.logging.kibana.crt] **************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:7
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Creating cert req for system.logging.kibana] *********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Creating cert req for system.logging.kibana] *********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:22
changed: [openshift] => {
    "changed": true, 
    "cmd": [
        "openssl", 
        "req", 
        "-out", 
        "/etc/origin/logging/system.logging.kibana.csr", 
        "-new", 
        "-newkey", 
        "rsa:2048", 
        "-keyout", 
        "/etc/origin/logging/system.logging.kibana.key", 
        "-subj", 
        "/CN=system.logging.kibana/OU=OpenShift/O=Logging", 
        "-days", 
        "712", 
        "-nodes"
    ], 
    "delta": "0:00:00.031062", 
    "end": "2017-06-09 12:41:50.108231", 
    "rc": 0, 
    "start": "2017-06-09 12:41:50.077169"
}

STDERR:

Generating a 2048 bit RSA private key
..+++
...........+++
writing new private key to '/etc/origin/logging/system.logging.kibana.key'
-----

TASK [openshift_logging : Sign cert request with CA for system.logging.kibana] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:31
changed: [openshift] => {
    "changed": true, 
    "cmd": [
        "openssl", 
        "ca", 
        "-in", 
        "/etc/origin/logging/system.logging.kibana.csr", 
        "-notext", 
        "-out", 
        "/etc/origin/logging/system.logging.kibana.crt", 
        "-config", 
        "/etc/origin/logging/signing.conf", 
        "-extensions", 
        "v3_req", 
        "-batch", 
        "-extensions", 
        "server_ext"
    ], 
    "delta": "0:00:00.009489", 
    "end": "2017-06-09 12:41:50.265018", 
    "rc": 0, 
    "start": "2017-06-09 12:41:50.255529"
}

STDERR:

Using configuration from /etc/origin/logging/signing.conf
Check that the request matches the signature
Signature ok
Certificate Details:
        Serial Number: 3 (0x3)
        Validity
            Not Before: Jun  9 16:41:50 2017 GMT
            Not After : Jun  9 16:41:50 2019 GMT
        Subject:
            organizationName          = Logging
            organizationalUnitName    = OpenShift
            commonName                = system.logging.kibana
        X509v3 extensions:
            X509v3 Key Usage: critical
                Digital Signature, Key Encipherment
            X509v3 Basic Constraints: 
                CA:FALSE
            X509v3 Extended Key Usage: 
                TLS Web Server Authentication, TLS Web Client Authentication
            X509v3 Subject Key Identifier: 
                0B:24:B1:13:37:0F:9C:2F:B6:CE:D0:2E:2B:C6:AB:12:1D:22:68:A2
            X509v3 Authority Key Identifier: 
                0.
Certificate is to be certified until Jun  9 16:41:50 2019 GMT (730 days)

Write out database with 1 new entries
Data Base Updated

TASK [openshift_logging : Checking for system.logging.curator.key] *************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:2
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Checking for system.logging.curator.crt] *************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:7
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Creating cert req for system.logging.curator] ********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Creating cert req for system.logging.curator] ********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:22
changed: [openshift] => {
    "changed": true, 
    "cmd": [
        "openssl", 
        "req", 
        "-out", 
        "/etc/origin/logging/system.logging.curator.csr", 
        "-new", 
        "-newkey", 
        "rsa:2048", 
        "-keyout", 
        "/etc/origin/logging/system.logging.curator.key", 
        "-subj", 
        "/CN=system.logging.curator/OU=OpenShift/O=Logging", 
        "-days", 
        "712", 
        "-nodes"
    ], 
    "delta": "0:00:00.080961", 
    "end": "2017-06-09 12:41:50.805051", 
    "rc": 0, 
    "start": "2017-06-09 12:41:50.724090"
}

STDERR:

Generating a 2048 bit RSA private key
...........................+++
..................+++
writing new private key to '/etc/origin/logging/system.logging.curator.key'
-----

TASK [openshift_logging : Sign cert request with CA for system.logging.curator] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:31
changed: [openshift] => {
    "changed": true, 
    "cmd": [
        "openssl", 
        "ca", 
        "-in", 
        "/etc/origin/logging/system.logging.curator.csr", 
        "-notext", 
        "-out", 
        "/etc/origin/logging/system.logging.curator.crt", 
        "-config", 
        "/etc/origin/logging/signing.conf", 
        "-extensions", 
        "v3_req", 
        "-batch", 
        "-extensions", 
        "server_ext"
    ], 
    "delta": "0:00:00.009250", 
    "end": "2017-06-09 12:41:50.956667", 
    "rc": 0, 
    "start": "2017-06-09 12:41:50.947417"
}

STDERR:

Using configuration from /etc/origin/logging/signing.conf
Check that the request matches the signature
Signature ok
Certificate Details:
        Serial Number: 4 (0x4)
        Validity
            Not Before: Jun  9 16:41:50 2017 GMT
            Not After : Jun  9 16:41:50 2019 GMT
        Subject:
            organizationName          = Logging
            organizationalUnitName    = OpenShift
            commonName                = system.logging.curator
        X509v3 extensions:
            X509v3 Key Usage: critical
                Digital Signature, Key Encipherment
            X509v3 Basic Constraints: 
                CA:FALSE
            X509v3 Extended Key Usage: 
                TLS Web Server Authentication, TLS Web Client Authentication
            X509v3 Subject Key Identifier: 
                36:27:10:D8:50:83:A9:18:52:B6:20:76:CF:98:E4:9A:68:E0:EC:34
            X509v3 Authority Key Identifier: 
                0.
Certificate is to be certified until Jun  9 16:41:50 2019 GMT (730 days)

Write out database with 1 new entries
Data Base Updated

TASK [openshift_logging : Checking for system.admin.key] ***********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:2
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Checking for system.admin.crt] ***********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:7
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Creating cert req for system.admin] ******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Creating cert req for system.admin] ******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:22
changed: [openshift] => {
    "changed": true, 
    "cmd": [
        "openssl", 
        "req", 
        "-out", 
        "/etc/origin/logging/system.admin.csr", 
        "-new", 
        "-newkey", 
        "rsa:2048", 
        "-keyout", 
        "/etc/origin/logging/system.admin.key", 
        "-subj", 
        "/CN=system.admin/OU=OpenShift/O=Logging", 
        "-days", 
        "712", 
        "-nodes"
    ], 
    "delta": "0:00:00.065779", 
    "end": "2017-06-09 12:41:51.474865", 
    "rc": 0, 
    "start": "2017-06-09 12:41:51.409086"
}

STDERR:

Generating a 2048 bit RSA private key
..........+++
..........................+++
writing new private key to '/etc/origin/logging/system.admin.key'
-----

TASK [openshift_logging : Sign cert request with CA for system.admin] **********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_pems.yaml:31
changed: [openshift] => {
    "changed": true, 
    "cmd": [
        "openssl", 
        "ca", 
        "-in", 
        "/etc/origin/logging/system.admin.csr", 
        "-notext", 
        "-out", 
        "/etc/origin/logging/system.admin.crt", 
        "-config", 
        "/etc/origin/logging/signing.conf", 
        "-extensions", 
        "v3_req", 
        "-batch", 
        "-extensions", 
        "server_ext"
    ], 
    "delta": "0:00:00.009718", 
    "end": "2017-06-09 12:41:51.627446", 
    "rc": 0, 
    "start": "2017-06-09 12:41:51.617728"
}

STDERR:

Using configuration from /etc/origin/logging/signing.conf
Check that the request matches the signature
Signature ok
Certificate Details:
        Serial Number: 5 (0x5)
        Validity
            Not Before: Jun  9 16:41:51 2017 GMT
            Not After : Jun  9 16:41:51 2019 GMT
        Subject:
            organizationName          = Logging
            organizationalUnitName    = OpenShift
            commonName                = system.admin
        X509v3 extensions:
            X509v3 Key Usage: critical
                Digital Signature, Key Encipherment
            X509v3 Basic Constraints: 
                CA:FALSE
            X509v3 Extended Key Usage: 
                TLS Web Server Authentication, TLS Web Client Authentication
            X509v3 Subject Key Identifier: 
                A5:77:1B:8E:DC:12:58:FE:5A:3C:8A:94:6A:86:C7:31:A0:86:D7:82
            X509v3 Authority Key Identifier: 
                0.
Certificate is to be certified until Jun  9 16:41:51 2019 GMT (730 days)

Write out database with 1 new entries
Data Base Updated

TASK [openshift_logging : Generate PEM cert for mux] ***************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:121
skipping: [openshift] => (item=system.logging.mux)  => {
    "changed": false, 
    "node_name": "system.logging.mux", 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Generate PEM cert for Elasticsearch external route] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:129
skipping: [openshift] => (item=system.logging.es)  => {
    "changed": false, 
    "node_name": "system.logging.es", 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Creating necessary JKS certs] ************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:137
included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml for openshift

TASK [openshift_logging : Checking for elasticsearch.jks] **********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:3
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Checking for logging-es.jks] *************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:8
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Checking for system.admin.jks] ***********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:13
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Checking for truststore.jks] *************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:18
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging : Create placeholder for previously created JKS certs to prevent recreating...] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:23
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Create placeholder for previously created JKS certs to prevent recreating...] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:28
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Create placeholder for previously created JKS certs to prevent recreating...] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:33
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Create placeholder for previously created JKS certs to prevent recreating...] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:38
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : pulling down signing items from host] ****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:43
changed: [openshift] => (item=ca.crt) => {
    "changed": true, 
    "checksum": "6685168ae90c4df135f8e786f6f0e84cbbd84059", 
    "dest": "/tmp/openshift-logging-ansible-JLU047/ca.crt", 
    "item": "ca.crt", 
    "md5sum": "10b5332b99792d282d75cc79b51a6bd5", 
    "remote_checksum": "6685168ae90c4df135f8e786f6f0e84cbbd84059", 
    "remote_md5sum": null
}
changed: [openshift] => (item=ca.key) => {
    "changed": true, 
    "checksum": "fcd0f659659fc91b8c8e16c3c774847331ddfa59", 
    "dest": "/tmp/openshift-logging-ansible-JLU047/ca.key", 
    "item": "ca.key", 
    "md5sum": "79fafcb151a4d2fe76981d9d7bf99f8f", 
    "remote_checksum": "fcd0f659659fc91b8c8e16c3c774847331ddfa59", 
    "remote_md5sum": null
}
changed: [openshift] => (item=ca.serial.txt) => {
    "changed": true, 
    "checksum": "b649682b92a811746098e5c91e891e5142a41950", 
    "dest": "/tmp/openshift-logging-ansible-JLU047/ca.serial.txt", 
    "item": "ca.serial.txt", 
    "md5sum": "76b01ce73ac53fdac1c67d27ac040473", 
    "remote_checksum": "b649682b92a811746098e5c91e891e5142a41950", 
    "remote_md5sum": null
}
ok: [openshift] => (item=ca.crl.srl) => {
    "changed": false, 
    "file": "/etc/origin/logging/ca.crl.srl", 
    "item": "ca.crl.srl"
}

MSG:

the remote file does not exist, not transferring, ignored
changed: [openshift] => (item=ca.db) => {
    "changed": true, 
    "checksum": "19867e4c13b7c573d2d4ba8227242c6095429636", 
    "dest": "/tmp/openshift-logging-ansible-JLU047/ca.db", 
    "item": "ca.db", 
    "md5sum": "a84bc904145b86bd89709519269d431c", 
    "remote_checksum": "19867e4c13b7c573d2d4ba8227242c6095429636", 
    "remote_md5sum": null
}

TASK [openshift_logging : template] ********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:56
changed: [openshift -> 127.0.0.1] => {
    "changed": true, 
    "checksum": "6a453857f54b299e437c27638ad322a668967354", 
    "dest": "/tmp/openshift-logging-ansible-JLU047/signing.conf", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "a574e116514ffc94dd579c0238c6fea2", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 4281, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026513.42-8424965073934/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging : Run JKS generation script] ***************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:61
changed: [openshift -> 127.0.0.1] => {
    "changed": true, 
    "rc": 0
}

STDOUT:

Generating keystore and certificate for node system.admin
Generating certificate signing request for node system.admin
Sign certificate request with CA
Import back to keystore (including CA chain)
All done for system.admin
Generating keystore and certificate for node elasticsearch
Generating certificate signing request for node elasticsearch
Sign certificate request with CA
Import back to keystore (including CA chain)
All done for elasticsearch
Generating keystore and certificate for node logging-es
Generating certificate signing request for node logging-es
Sign certificate request with CA
Import back to keystore (including CA chain)
All done for logging-es
Import CA to truststore for validating client certs



STDERR:

+ '[' 2 -lt 1 ']'
+ dir=/tmp/openshift-logging-ansible-JLU047
+ SCRATCH_DIR=/tmp/openshift-logging-ansible-JLU047
+ PROJECT=logging
+ [[ ! -f /tmp/openshift-logging-ansible-JLU047/system.admin.jks ]]
+ generate_JKS_client_cert system.admin
+ NODE_NAME=system.admin
+ ks_pass=kspass
+ ts_pass=tspass
+ dir=/tmp/openshift-logging-ansible-JLU047
+ echo Generating keystore and certificate for node system.admin
+ keytool -genkey -alias system.admin -keystore /tmp/openshift-logging-ansible-JLU047/system.admin.jks -keyalg RSA -keysize 2048 -validity 712 -keypass kspass -storepass kspass -dname 'CN=system.admin, OU=OpenShift, O=Logging'
+ echo Generating certificate signing request for node system.admin
+ keytool -certreq -alias system.admin -keystore /tmp/openshift-logging-ansible-JLU047/system.admin.jks -file /tmp/openshift-logging-ansible-JLU047/system.admin.jks.csr -keyalg rsa -keypass kspass -storepass kspass -dname 'CN=system.admin, OU=OpenShift, O=Logging'
+ echo Sign certificate request with CA
+ openssl ca -in /tmp/openshift-logging-ansible-JLU047/system.admin.jks.csr -notext -out /tmp/openshift-logging-ansible-JLU047/system.admin.jks.crt -config /tmp/openshift-logging-ansible-JLU047/signing.conf -extensions v3_req -batch -extensions server_ext
Using configuration from /tmp/openshift-logging-ansible-JLU047/signing.conf
Check that the request matches the signature
Signature ok
Certificate Details:
        Serial Number: 6 (0x6)
        Validity
            Not Before: Jun  9 16:42:03 2017 GMT
            Not After : Jun  9 16:42:03 2019 GMT
        Subject:
            organizationName          = Logging
            organizationalUnitName    = OpenShift
            commonName                = system.admin
        X509v3 extensions:
            X509v3 Key Usage: critical
                Digital Signature, Key Encipherment
            X509v3 Basic Constraints: 
                CA:FALSE
            X509v3 Extended Key Usage: 
                TLS Web Server Authentication, TLS Web Client Authentication
            X509v3 Subject Key Identifier: 
                34:D7:51:1D:D9:4D:33:9C:3F:D2:39:8B:53:54:9C:3D:1D:D5:F3:AC
            X509v3 Authority Key Identifier: 
                0.
Certificate is to be certified until Jun  9 16:42:03 2019 GMT (730 days)

Write out database with 1 new entries
Data Base Updated
+ echo 'Import back to keystore (including CA chain)'
+ keytool -import -file /tmp/openshift-logging-ansible-JLU047/ca.crt -keystore /tmp/openshift-logging-ansible-JLU047/system.admin.jks -storepass kspass -noprompt -alias sig-ca
Certificate was added to keystore
+ keytool -import -file /tmp/openshift-logging-ansible-JLU047/system.admin.jks.crt -keystore /tmp/openshift-logging-ansible-JLU047/system.admin.jks -storepass kspass -noprompt -alias system.admin
Certificate reply was installed in keystore
+ echo All done for system.admin
+ [[ ! -f /tmp/openshift-logging-ansible-JLU047/elasticsearch.jks ]]
++ join , logging-es logging-es-ops
++ local IFS=,
++ shift
++ echo logging-es,logging-es-ops
+ generate_JKS_chain true elasticsearch logging-es,logging-es-ops
+ dir=/tmp/openshift-logging-ansible-JLU047
+ ADD_OID=true
+ NODE_NAME=elasticsearch
+ CERT_NAMES=logging-es,logging-es-ops
+ ks_pass=kspass
+ ts_pass=tspass
+ rm -rf elasticsearch
+ extension_names=
+ for name in '${CERT_NAMES//,/ }'
+ extension_names=,dns:logging-es
+ for name in '${CERT_NAMES//,/ }'
+ extension_names=,dns:logging-es,dns:logging-es-ops
+ '[' true = true ']'
+ extension_names=,dns:logging-es,dns:logging-es-ops,oid:1.2.3.4.5.5
+ echo Generating keystore and certificate for node elasticsearch
+ keytool -genkey -alias elasticsearch -keystore /tmp/openshift-logging-ansible-JLU047/elasticsearch.jks -keypass kspass -storepass kspass -keyalg RSA -keysize 2048 -validity 712 -dname 'CN=elasticsearch, OU=OpenShift, O=Logging' -ext san=dns:localhost,ip:127.0.0.1,dns:logging-es,dns:logging-es-ops,oid:1.2.3.4.5.5
+ echo Generating certificate signing request for node elasticsearch
+ keytool -certreq -alias elasticsearch -keystore /tmp/openshift-logging-ansible-JLU047/elasticsearch.jks -storepass kspass -file /tmp/openshift-logging-ansible-JLU047/elasticsearch.csr -keyalg rsa -dname 'CN=elasticsearch, OU=OpenShift, O=Logging' -ext san=dns:localhost,ip:127.0.0.1,dns:logging-es,dns:logging-es-ops,oid:1.2.3.4.5.5
+ echo Sign certificate request with CA
+ openssl ca -in /tmp/openshift-logging-ansible-JLU047/elasticsearch.csr -notext -out /tmp/openshift-logging-ansible-JLU047/elasticsearch.crt -config /tmp/openshift-logging-ansible-JLU047/signing.conf -extensions v3_req -batch -extensions server_ext
Using configuration from /tmp/openshift-logging-ansible-JLU047/signing.conf
Check that the request matches the signature
Signature ok
Certificate Details:
        Serial Number: 7 (0x7)
        Validity
            Not Before: Jun  9 16:42:05 2017 GMT
            Not After : Jun  9 16:42:05 2019 GMT
        Subject:
            organizationName          = Logging
            organizationalUnitName    = OpenShift
            commonName                = elasticsearch
        X509v3 extensions:
            X509v3 Key Usage: critical
                Digital Signature, Key Encipherment
            X509v3 Basic Constraints: 
                CA:FALSE
            X509v3 Extended Key Usage: 
                TLS Web Server Authentication, TLS Web Client Authentication
            X509v3 Subject Key Identifier: 
                6F:B8:CA:23:9C:19:75:D3:12:55:01:ED:A3:32:E7:6F:81:0B:1F:B4
            X509v3 Authority Key Identifier: 
                0.
            X509v3 Subject Alternative Name: 
                DNS:localhost, IP Address:127.0.0.1, DNS:logging-es, DNS:logging-es-ops, Registered ID:1.2.3.4.5.5
Certificate is to be certified until Jun  9 16:42:05 2019 GMT (730 days)

Write out database with 1 new entries
Data Base Updated
+ echo 'Import back to keystore (including CA chain)'
+ keytool -import -file /tmp/openshift-logging-ansible-JLU047/ca.crt -keystore /tmp/openshift-logging-ansible-JLU047/elasticsearch.jks -storepass kspass -noprompt -alias sig-ca
Certificate was added to keystore
+ keytool -import -file /tmp/openshift-logging-ansible-JLU047/elasticsearch.crt -keystore /tmp/openshift-logging-ansible-JLU047/elasticsearch.jks -storepass kspass -noprompt -alias elasticsearch
Certificate reply was installed in keystore
+ echo All done for elasticsearch
+ [[ ! -f /tmp/openshift-logging-ansible-JLU047/logging-es.jks ]]
++ join , logging-es logging-es.logging.svc.cluster.local logging-es-cluster logging-es-cluster.logging.svc.cluster.local logging-es-ops logging-es-ops.logging.svc.cluster.local logging-es-ops-cluster logging-es-ops-cluster.logging.svc.cluster.local
++ local IFS=,
++ shift
++ echo logging-es,logging-es.logging.svc.cluster.local,logging-es-cluster,logging-es-cluster.logging.svc.cluster.local,logging-es-ops,logging-es-ops.logging.svc.cluster.local,logging-es-ops-cluster,logging-es-ops-cluster.logging.svc.cluster.local
+ generate_JKS_chain false logging-es logging-es,logging-es.logging.svc.cluster.local,logging-es-cluster,logging-es-cluster.logging.svc.cluster.local,logging-es-ops,logging-es-ops.logging.svc.cluster.local,logging-es-ops-cluster,logging-es-ops-cluster.logging.svc.cluster.local
+ dir=/tmp/openshift-logging-ansible-JLU047
+ ADD_OID=false
+ NODE_NAME=logging-es
+ CERT_NAMES=logging-es,logging-es.logging.svc.cluster.local,logging-es-cluster,logging-es-cluster.logging.svc.cluster.local,logging-es-ops,logging-es-ops.logging.svc.cluster.local,logging-es-ops-cluster,logging-es-ops-cluster.logging.svc.cluster.local
+ ks_pass=kspass
+ ts_pass=tspass
+ rm -rf logging-es
+ extension_names=
+ for name in '${CERT_NAMES//,/ }'
+ extension_names=,dns:logging-es
+ for name in '${CERT_NAMES//,/ }'
+ extension_names=,dns:logging-es,dns:logging-es.logging.svc.cluster.local
+ for name in '${CERT_NAMES//,/ }'
+ extension_names=,dns:logging-es,dns:logging-es.logging.svc.cluster.local,dns:logging-es-cluster
+ for name in '${CERT_NAMES//,/ }'
+ extension_names=,dns:logging-es,dns:logging-es.logging.svc.cluster.local,dns:logging-es-cluster,dns:logging-es-cluster.logging.svc.cluster.local
+ for name in '${CERT_NAMES//,/ }'
+ extension_names=,dns:logging-es,dns:logging-es.logging.svc.cluster.local,dns:logging-es-cluster,dns:logging-es-cluster.logging.svc.cluster.local,dns:logging-es-ops
+ for name in '${CERT_NAMES//,/ }'
+ extension_names=,dns:logging-es,dns:logging-es.logging.svc.cluster.local,dns:logging-es-cluster,dns:logging-es-cluster.logging.svc.cluster.local,dns:logging-es-ops,dns:logging-es-ops.logging.svc.cluster.local
+ for name in '${CERT_NAMES//,/ }'
+ extension_names=,dns:logging-es,dns:logging-es.logging.svc.cluster.local,dns:logging-es-cluster,dns:logging-es-cluster.logging.svc.cluster.local,dns:logging-es-ops,dns:logging-es-ops.logging.svc.cluster.local,dns:logging-es-ops-cluster
+ for name in '${CERT_NAMES//,/ }'
+ extension_names=,dns:logging-es,dns:logging-es.logging.svc.cluster.local,dns:logging-es-cluster,dns:logging-es-cluster.logging.svc.cluster.local,dns:logging-es-ops,dns:logging-es-ops.logging.svc.cluster.local,dns:logging-es-ops-cluster,dns:logging-es-ops-cluster.logging.svc.cluster.local
+ '[' false = true ']'
+ echo Generating keystore and certificate for node logging-es
+ keytool -genkey -alias logging-es -keystore /tmp/openshift-logging-ansible-JLU047/logging-es.jks -keypass kspass -storepass kspass -keyalg RSA -keysize 2048 -validity 712 -dname 'CN=logging-es, OU=OpenShift, O=Logging' -ext san=dns:localhost,ip:127.0.0.1,dns:logging-es,dns:logging-es.logging.svc.cluster.local,dns:logging-es-cluster,dns:logging-es-cluster.logging.svc.cluster.local,dns:logging-es-ops,dns:logging-es-ops.logging.svc.cluster.local,dns:logging-es-ops-cluster,dns:logging-es-ops-cluster.logging.svc.cluster.local
+ echo Generating certificate signing request for node logging-es
+ keytool -certreq -alias logging-es -keystore /tmp/openshift-logging-ansible-JLU047/logging-es.jks -storepass kspass -file /tmp/openshift-logging-ansible-JLU047/logging-es.csr -keyalg rsa -dname 'CN=logging-es, OU=OpenShift, O=Logging' -ext san=dns:localhost,ip:127.0.0.1,dns:logging-es,dns:logging-es.logging.svc.cluster.local,dns:logging-es-cluster,dns:logging-es-cluster.logging.svc.cluster.local,dns:logging-es-ops,dns:logging-es-ops.logging.svc.cluster.local,dns:logging-es-ops-cluster,dns:logging-es-ops-cluster.logging.svc.cluster.local
+ echo Sign certificate request with CA
+ openssl ca -in /tmp/openshift-logging-ansible-JLU047/logging-es.csr -notext -out /tmp/openshift-logging-ansible-JLU047/logging-es.crt -config /tmp/openshift-logging-ansible-JLU047/signing.conf -extensions v3_req -batch -extensions server_ext
Using configuration from /tmp/openshift-logging-ansible-JLU047/signing.conf
Check that the request matches the signature
Signature ok
Certificate Details:
        Serial Number: 8 (0x8)
        Validity
            Not Before: Jun  9 16:42:06 2017 GMT
            Not After : Jun  9 16:42:06 2019 GMT
        Subject:
            organizationName          = Logging
            organizationalUnitName    = OpenShift
            commonName                = logging-es
        X509v3 extensions:
            X509v3 Key Usage: critical
                Digital Signature, Key Encipherment
            X509v3 Basic Constraints: 
                CA:FALSE
            X509v3 Extended Key Usage: 
                TLS Web Server Authentication, TLS Web Client Authentication
            X509v3 Subject Key Identifier: 
                95:90:E8:EB:86:62:4C:49:BB:A2:FD:2F:F1:9D:50:D9:37:1C:CE:A9
            X509v3 Authority Key Identifier: 
                0.
            X509v3 Subject Alternative Name: 
                DNS:localhost, IP Address:127.0.0.1, DNS:logging-es, DNS:logging-es.logging.svc.cluster.local, DNS:logging-es-cluster, DNS:logging-es-cluster.logging.svc.cluster.local, DNS:logging-es-ops, DNS:logging-es-ops.logging.svc.cluster.local, DNS:logging-es-ops-cluster, DNS:logging-es-ops-cluster.logging.svc.cluster.local
Certificate is to be certified until Jun  9 16:42:06 2019 GMT (730 days)

Write out database with 1 new entries
Data Base Updated
+ echo 'Import back to keystore (including CA chain)'
+ keytool -import -file /tmp/openshift-logging-ansible-JLU047/ca.crt -keystore /tmp/openshift-logging-ansible-JLU047/logging-es.jks -storepass kspass -noprompt -alias sig-ca
Certificate was added to keystore
+ keytool -import -file /tmp/openshift-logging-ansible-JLU047/logging-es.crt -keystore /tmp/openshift-logging-ansible-JLU047/logging-es.jks -storepass kspass -noprompt -alias logging-es
Certificate reply was installed in keystore
+ echo All done for logging-es
+ '[' '!' -f /tmp/openshift-logging-ansible-JLU047/truststore.jks ']'
+ createTruststore
+ echo 'Import CA to truststore for validating client certs'
+ keytool -import -file /tmp/openshift-logging-ansible-JLU047/ca.crt -keystore /tmp/openshift-logging-ansible-JLU047/truststore.jks -storepass tspass -noprompt -alias sig-ca
Certificate was added to keystore
+ exit 0


TASK [openshift_logging : Pushing locally generated JKS certs to remote host...] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:66
changed: [openshift] => {
    "changed": true, 
    "checksum": "f898beb727c58ecd52e69e07f3defe14f438368c", 
    "dest": "/etc/origin/logging/elasticsearch.jks", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "ce88a5033d03fb4ad1c84b53eec34528", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "system_u:object_r:etc_t:s0", 
    "size": 3767, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026527.28-52122902359624/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging : Pushing locally generated JKS certs to remote host...] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:72
changed: [openshift] => {
    "changed": true, 
    "checksum": "c358ed720fa69ca0e9292029e2bccbf8a92825d8", 
    "dest": "/etc/origin/logging/logging-es.jks", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "45e62699bd2bc88eec260df0f84532cd", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "system_u:object_r:etc_t:s0", 
    "size": 3982, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026527.54-85282567077454/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging : Pushing locally generated JKS certs to remote host...] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:78
changed: [openshift] => {
    "changed": true, 
    "checksum": "9ef53265f6a7d7e40f4914da6fe3098c5431de86", 
    "dest": "/etc/origin/logging/system.admin.jks", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "a490c939c4f262f70e9daaa5016267f2", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "system_u:object_r:etc_t:s0", 
    "size": 3702, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026527.8-175988106156792/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging : Pushing locally generated JKS certs to remote host...] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_jks.yaml:84
changed: [openshift] => {
    "changed": true, 
    "checksum": "52563f7ed03aba6545de2f9ab9f96c1094956d2c", 
    "dest": "/etc/origin/logging/truststore.jks", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "16b5e19cb07f9929be797e3ed5df82a6", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "system_u:object_r:etc_t:s0", 
    "size": 797, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026528.07-163483054995984/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging : Generate proxy session] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:141
ok: [openshift] => {
    "ansible_facts": {
        "session_secret": "L5Ou3p6PHGg9O8ruO6MTwgRYuZlqDEiCTCetSDeMRtUvrP34LoDmgI5osGVjeMNp26y2jA58smwpwrc04HmUihC5nCwb9FGtPvPKGMKF5Y7JTIFlgAf8278oOEtOn8NW5KJwlcZmTfyWOm6UqiDbeJ79jId3rZ9z16AeqU1JUnCgGEcgFwN45gGZjq1QuTxmdYP44R8A"
    }, 
    "changed": false
}

TASK [openshift_logging : Generate oauth client secret] ************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/generate_certs.yaml:146
ok: [openshift] => {
    "ansible_facts": {
        "oauth_secret": "BkwNNIHFUDS99jXTGfXnec7kIbOyilS1foh99PYd1Hb6ljHzTOtkNgtObPDuYu11"
    }, 
    "changed": false
}

TASK [openshift_logging : set_fact] ********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:53

TASK [openshift_logging : set_fact] ********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:57
ok: [openshift] => {
    "ansible_facts": {
        "es_indices": "[]"
    }, 
    "changed": false
}

TASK [openshift_logging : set_fact] ********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:60
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:64

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:85
statically included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml

TASK [openshift_logging_elasticsearch : Validate Elasticsearch cluster size] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:2
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Validate Elasticsearch Ops cluster size] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:6
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : fail] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:10
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : set_fact] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:14
ok: [openshift] => {
    "ansible_facts": {
        "elasticsearch_name": "logging-elasticsearch", 
        "es_component": "es"
    }, 
    "changed": false
}

TASK [openshift_logging_elasticsearch : fail] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml:3
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : set_fact] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml:7
ok: [openshift] => {
    "ansible_facts": {
        "es_version": "3_5"
    }, 
    "changed": false
}

TASK [openshift_logging_elasticsearch : debug] *********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml:11
ok: [openshift] => {
    "changed": false, 
    "openshift_logging_image_version": "latest"
}

TASK [openshift_logging_elasticsearch : set_fact] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml:14
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : fail] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml:17
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Create temp directory for doing work in] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:21
ok: [openshift] => {
    "changed": false, 
    "cmd": [
        "mktemp", 
        "-d", 
        "/tmp/openshift-logging-ansible-XXXXXX"
    ], 
    "delta": "0:00:00.002542", 
    "end": "2017-06-09 12:42:09.131308", 
    "rc": 0, 
    "start": "2017-06-09 12:42:09.128766"
}

STDOUT:

/tmp/openshift-logging-ansible-Wu7DF6

TASK [openshift_logging_elasticsearch : set_fact] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:26
ok: [openshift] => {
    "ansible_facts": {
        "tempdir": "/tmp/openshift-logging-ansible-Wu7DF6"
    }, 
    "changed": false
}

TASK [openshift_logging_elasticsearch : Create templates subdirectory] *********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:30
ok: [openshift] => {
    "changed": false, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/openshift-logging-ansible-Wu7DF6/templates", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 40, 
    "state": "directory", 
    "uid": 0
}

TASK [openshift_logging_elasticsearch : Create ES service account] *************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:40
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Create ES service account] *************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:48
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get sa aggregated-logging-elasticsearch -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "imagePullSecrets": [
                    {
                        "name": "aggregated-logging-elasticsearch-dockercfg-4q8s3"
                    }
                ], 
                "kind": "ServiceAccount", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:10Z", 
                    "name": "aggregated-logging-elasticsearch", 
                    "namespace": "logging", 
                    "resourceVersion": "1565", 
                    "selfLink": "/api/v1/namespaces/logging/serviceaccounts/aggregated-logging-elasticsearch", 
                    "uid": "94f5cf46-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "secrets": [
                    {
                        "name": "aggregated-logging-elasticsearch-token-pzv44"
                    }, 
                    {
                        "name": "aggregated-logging-elasticsearch-dockercfg-4q8s3"
                    }
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : copy] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:57
changed: [openshift] => {
    "changed": true, 
    "checksum": "e5015364391ac609da8655a9a1224131599a5cea", 
    "dest": "/tmp/openshift-logging-ansible-Wu7DF6/rolebinding-reader.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "446fb96447527f48f97e69bb41bad7be", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 135, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026530.53-78960127776359/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_elasticsearch : Create rolebinding-reader role] ********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:61
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get clusterrole rolebinding-reader -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "ClusterRole", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:11Z", 
                    "name": "rolebinding-reader", 
                    "resourceVersion": "122", 
                    "selfLink": "/oapi/v1/clusterroles/rolebinding-reader", 
                    "uid": "95bf2a24-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "rules": [
                    {
                        "apiGroups": [
                            ""
                        ], 
                        "attributeRestrictions": null, 
                        "resources": [
                            "clusterrolebindings"
                        ], 
                        "verbs": [
                            "get"
                        ]
                    }
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : Set rolebinding-reader permissions for ES] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:72
changed: [openshift] => {
    "changed": true, 
    "present": "present", 
    "results": {
        "cmd": "/bin/oc adm policy add-cluster-role-to-user rolebinding-reader system:serviceaccount:logging:aggregated-logging-elasticsearch -n logging", 
        "results": "", 
        "returncode": 0
    }
}

TASK [openshift_logging_elasticsearch : Generate logging-elasticsearch-view-role] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:81
ok: [openshift] => {
    "changed": false, 
    "checksum": "d752c09323565f80ed14fa806d42284f0c5aef2a", 
    "dest": "/tmp/openshift-logging-ansible-Wu7DF6/logging-elasticsearch-view-role.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "8299dca2fb036c06ba7c4f620680e0f6", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 183, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026532.67-136441321499934/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_elasticsearch : Set logging-elasticsearch-view-role role] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:94
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get rolebinding logging-elasticsearch-view-role -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "groupNames": null, 
                "kind": "RoleBinding", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:13Z", 
                    "name": "logging-elasticsearch-view-role", 
                    "namespace": "logging", 
                    "resourceVersion": "961", 
                    "selfLink": "/oapi/v1/namespaces/logging/rolebindings/logging-elasticsearch-view-role", 
                    "uid": "97031943-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "roleRef": {
                    "name": "view"
                }, 
                "subjects": [
                    {
                        "kind": "ServiceAccount", 
                        "name": "aggregated-logging-elasticsearch", 
                        "namespace": "logging"
                    }
                ], 
                "userNames": [
                    "system:serviceaccount:logging:aggregated-logging-elasticsearch"
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : template] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:105
ok: [openshift] => {
    "changed": false, 
    "checksum": "f91458d5dad42c496e2081ef872777a6f6eb9ff9", 
    "dest": "/tmp/openshift-logging-ansible-Wu7DF6/elasticsearch-logging.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "e4be7c33c1927bbdd8c909bfbe3d9f0b", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 2171, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026533.83-147199170790900/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_elasticsearch : template] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:111
ok: [openshift] => {
    "changed": false, 
    "checksum": "6d4f976f6e77a6e0c8dca7e01fb5bedb68678b1d", 
    "dest": "/tmp/openshift-logging-ansible-Wu7DF6/elasticsearch.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "75abfd3a190832e593a8e5e7c5695e8e", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 2454, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026534.11-192183441935023/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_elasticsearch : copy] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:121
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : copy] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:127
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Set ES configmap] **********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:133
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get configmap logging-elasticsearch -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "data": {
                    "elasticsearch.yml": "cluster:\n  name: ${CLUSTER_NAME}\n\nscript:\n  inline: on\n  indexed: on\n\nindex:\n  number_of_shards: 1\n  number_of_replicas: 0\n  unassigned.node_left.delayed_timeout: 2m\n  translog:\n    flush_threshold_size: 256mb\n    flush_threshold_period: 5m\n\nnode:\n  master: ${IS_MASTER}\n  data: ${HAS_DATA}\n\nnetwork:\n  host: 0.0.0.0\n\ncloud:\n  kubernetes:\n    service: ${SERVICE_DNS}\n    namespace: ${NAMESPACE}\n\ndiscovery:\n  type: kubernetes\n  zen.ping.multicast.enabled: false\n  zen.minimum_master_nodes: ${NODE_QUORUM}\n\ngateway:\n  recover_after_nodes: ${NODE_QUORUM}\n  expected_nodes: ${RECOVER_EXPECTED_NODES}\n  recover_after_time: ${RECOVER_AFTER_TIME}\n\nio.fabric8.elasticsearch.authentication.users: [\"system.logging.kibana\", \"system.logging.fluentd\", \"system.logging.curator\", \"system.admin\"]\nio.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json\nio.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json\nio.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json\n\nopenshift.config:\n  use_common_data_model: true\n  project_index_prefix: \"project\"\n  time_field_name: \"@timestamp\"\n\nopenshift.searchguard:\n  keystore.path: /etc/elasticsearch/secret/admin.jks\n  truststore.path: /etc/elasticsearch/secret/searchguard.truststore\n\nopenshift.operations.allow_cluster_reader: false\n\npath:\n  data: /elasticsearch/persistent/${CLUSTER_NAME}/data\n  logs: /elasticsearch/${CLUSTER_NAME}/logs\n  work: /elasticsearch/${CLUSTER_NAME}/work\n  scripts: /elasticsearch/${CLUSTER_NAME}/scripts\n\nsearchguard:\n  authcz.admin_dn:\n  - CN=system.admin,OU=OpenShift,O=Logging\n  config_index_name: \".searchguard.${HOSTNAME}\"\n  ssl:\n    transport:\n      enabled: true\n      enforce_hostname_verification: false\n      keystore_type: JKS\n      keystore_filepath: /etc/elasticsearch/secret/searchguard.key\n      keystore_password: kspass\n      truststore_type: JKS\n      truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore\n      truststore_password: tspass\n    http:\n      enabled: true\n      keystore_type: JKS\n      keystore_filepath: /etc/elasticsearch/secret/key\n      keystore_password: kspass\n      clientauth_mode: OPTIONAL\n      truststore_type: JKS\n      truststore_filepath: /etc/elasticsearch/secret/truststore\n      truststore_password: tspass\n", 
                    "logging.yml": "# you can override this using by setting a system property, for example -Des.logger.level=DEBUG\nes.logger.level: INFO\nrootLogger: ${es.logger.level}, console, file\nlogger:\n  # log action execution errors for easier debugging\n  action: WARN\n  # reduce the logging for aws, too much is logged under the default INFO\n  com.amazonaws: WARN\n  io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL}\n  io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL}\n\n  # gateway\n  #gateway: DEBUG\n  #index.gateway: DEBUG\n\n  # peer shard recovery\n  #indices.recovery: DEBUG\n\n  # discovery\n  #discovery: TRACE\n\n  index.search.slowlog: TRACE, index_search_slow_log_file\n  index.indexing.slowlog: TRACE, index_indexing_slow_log_file\n\n  # search-guard\n  com.floragunn.searchguard: WARN\n\nadditivity:\n  index.search.slowlog: false\n  index.indexing.slowlog: false\n\nappender:\n  console:\n    type: console\n    layout:\n      type: consolePattern\n      conversionPattern: \"[%d{ISO8601}][%-5p][%-25c] %m%n\"\n\n  file:\n    type: dailyRollingFile\n    file: ${path.logs}/${cluster.name}.log\n    datePattern: \"'.'yyyy-MM-dd\"\n    layout:\n      type: pattern\n      conversionPattern: \"[%d{ISO8601}][%-5p][%-25c] %m%n\"\n\n  # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.\n  # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html\n  #file:\n    #type: extrasRollingFile\n    #file: ${path.logs}/${cluster.name}.log\n    #rollingPolicy: timeBased\n    #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz\n    #layout:\n      #type: pattern\n      #conversionPattern: \"[%d{ISO8601}][%-5p][%-25c] %m%n\"\n\n  index_search_slow_log_file:\n    type: dailyRollingFile\n    file: ${path.logs}/${cluster.name}_index_search_slowlog.log\n    datePattern: \"'.'yyyy-MM-dd\"\n    layout:\n      type: pattern\n      conversionPattern: \"[%d{ISO8601}][%-5p][%-25c] %m%n\"\n\n  index_indexing_slow_log_file:\n    type: dailyRollingFile\n    file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log\n    datePattern: \"'.'yyyy-MM-dd\"\n    layout:\n      type: pattern\n      conversionPattern: \"[%d{ISO8601}][%-5p][%-25c] %m%n\"\n"
                }, 
                "kind": "ConfigMap", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:15Z", 
                    "name": "logging-elasticsearch", 
                    "namespace": "logging", 
                    "resourceVersion": "1573", 
                    "selfLink": "/api/v1/namespaces/logging/configmaps/logging-elasticsearch", 
                    "uid": "97f55b55-4d32-11e7-ae30-0e910886c5dc"
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : Set ES secret] *************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:144
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc secrets new logging-elasticsearch key=/etc/origin/logging/logging-es.jks truststore=/etc/origin/logging/truststore.jks searchguard.key=/etc/origin/logging/elasticsearch.jks searchguard.truststore=/etc/origin/logging/truststore.jks admin-key=/etc/origin/logging/system.admin.key admin-cert=/etc/origin/logging/system.admin.crt admin-ca=/etc/origin/logging/ca.crt admin.jks=/etc/origin/logging/system.admin.jks -n logging", 
        "results": "", 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : Set logging-es-cluster service] ********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:168
changed: [openshift] => {
    "changed": true, 
    "results": {
        "clusterip": "172.30.11.253", 
        "cmd": "/bin/oc get service logging-es-cluster -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "Service", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:17Z", 
                    "name": "logging-es-cluster", 
                    "namespace": "logging", 
                    "resourceVersion": "1579", 
                    "selfLink": "/api/v1/namespaces/logging/services/logging-es-cluster", 
                    "uid": "9948290d-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "clusterIP": "172.30.11.253", 
                    "ports": [
                        {
                            "port": 9300, 
                            "protocol": "TCP", 
                            "targetPort": 9300
                        }
                    ], 
                    "selector": {
                        "component": "es", 
                        "provider": "openshift"
                    }, 
                    "sessionAffinity": "None", 
                    "type": "ClusterIP"
                }, 
                "status": {
                    "loadBalancer": {}
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : Set logging-es service] ****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:182
changed: [openshift] => {
    "changed": true, 
    "results": {
        "clusterip": "172.30.93.153", 
        "cmd": "/bin/oc get service logging-es -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "Service", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:18Z", 
                    "name": "logging-es", 
                    "namespace": "logging", 
                    "resourceVersion": "1582", 
                    "selfLink": "/api/v1/namespaces/logging/services/logging-es", 
                    "uid": "99f8206d-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "clusterIP": "172.30.93.153", 
                    "ports": [
                        {
                            "port": 9200, 
                            "protocol": "TCP", 
                            "targetPort": "restapi"
                        }
                    ], 
                    "selector": {
                        "component": "es", 
                        "provider": "openshift"
                    }, 
                    "sessionAffinity": "None", 
                    "type": "ClusterIP"
                }, 
                "status": {
                    "loadBalancer": {}
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : Creating ES storage template] **********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:197
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Creating ES storage template] **********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:210
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Set ES storage] ************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:225
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : set_fact] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:237
ok: [openshift] => {
    "ansible_facts": {
        "es_deploy_name": "logging-es-data-master-oawpjydu"
    }, 
    "changed": false
}

TASK [openshift_logging_elasticsearch : set_fact] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:241
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Set ES dc templates] *******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:246
changed: [openshift] => {
    "changed": true, 
    "checksum": "740ed3b16926ad5a1627d072df51d1795e764a93", 
    "dest": "/tmp/openshift-logging-ansible-Wu7DF6/templates/logging-es-dc.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "772883795badd56ce0d88e4f6a43fe7a", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 3139, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026539.02-233108976157135/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_elasticsearch : Set ES dc] *****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:262
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get dc logging-es-data-master-oawpjydu -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "DeploymentConfig", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:19Z", 
                    "generation": 2, 
                    "labels": {
                        "component": "es", 
                        "deployment": "logging-es-data-master-oawpjydu", 
                        "logging-infra": "elasticsearch", 
                        "provider": "openshift"
                    }, 
                    "name": "logging-es-data-master-oawpjydu", 
                    "namespace": "logging", 
                    "resourceVersion": "1596", 
                    "selfLink": "/oapi/v1/namespaces/logging/deploymentconfigs/logging-es-data-master-oawpjydu", 
                    "uid": "9acad417-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "replicas": 1, 
                    "selector": {
                        "component": "es", 
                        "deployment": "logging-es-data-master-oawpjydu", 
                        "logging-infra": "elasticsearch", 
                        "provider": "openshift"
                    }, 
                    "strategy": {
                        "activeDeadlineSeconds": 21600, 
                        "recreateParams": {
                            "timeoutSeconds": 600
                        }, 
                        "resources": {}, 
                        "type": "Recreate"
                    }, 
                    "template": {
                        "metadata": {
                            "creationTimestamp": null, 
                            "labels": {
                                "component": "es", 
                                "deployment": "logging-es-data-master-oawpjydu", 
                                "logging-infra": "elasticsearch", 
                                "provider": "openshift"
                            }, 
                            "name": "logging-es-data-master-oawpjydu"
                        }, 
                        "spec": {
                            "containers": [
                                {
                                    "env": [
                                        {
                                            "name": "NAMESPACE", 
                                            "valueFrom": {
                                                "fieldRef": {
                                                    "apiVersion": "v1", 
                                                    "fieldPath": "metadata.namespace"
                                                }
                                            }
                                        }, 
                                        {
                                            "name": "KUBERNETES_TRUST_CERT", 
                                            "value": "true"
                                        }, 
                                        {
                                            "name": "SERVICE_DNS", 
                                            "value": "logging-es-cluster"
                                        }, 
                                        {
                                            "name": "CLUSTER_NAME", 
                                            "value": "logging-es"
                                        }, 
                                        {
                                            "name": "INSTANCE_RAM", 
                                            "value": "8Gi"
                                        }, 
                                        {
                                            "name": "NODE_QUORUM", 
                                            "value": "1"
                                        }, 
                                        {
                                            "name": "RECOVER_EXPECTED_NODES", 
                                            "value": "1"
                                        }, 
                                        {
                                            "name": "RECOVER_AFTER_TIME", 
                                            "value": "5m"
                                        }, 
                                        {
                                            "name": "READINESS_PROBE_TIMEOUT", 
                                            "value": "30"
                                        }, 
                                        {
                                            "name": "IS_MASTER", 
                                            "value": "true"
                                        }, 
                                        {
                                            "name": "HAS_DATA", 
                                            "value": "true"
                                        }
                                    ], 
                                    "image": "172.30.197.120:5000/logging/logging-elasticsearch:latest", 
                                    "imagePullPolicy": "Always", 
                                    "name": "elasticsearch", 
                                    "ports": [
                                        {
                                            "containerPort": 9200, 
                                            "name": "restapi", 
                                            "protocol": "TCP"
                                        }, 
                                        {
                                            "containerPort": 9300, 
                                            "name": "cluster", 
                                            "protocol": "TCP"
                                        }
                                    ], 
                                    "readinessProbe": {
                                        "exec": {
                                            "command": [
                                                "/usr/share/elasticsearch/probe/readiness.sh"
                                            ]
                                        }, 
                                        "failureThreshold": 3, 
                                        "initialDelaySeconds": 10, 
                                        "periodSeconds": 5, 
                                        "successThreshold": 1, 
                                        "timeoutSeconds": 30
                                    }, 
                                    "resources": {
                                        "limits": {
                                            "cpu": "1", 
                                            "memory": "8Gi"
                                        }, 
                                        "requests": {
                                            "memory": "512Mi"
                                        }
                                    }, 
                                    "terminationMessagePath": "/dev/termination-log", 
                                    "terminationMessagePolicy": "File", 
                                    "volumeMounts": [
                                        {
                                            "mountPath": "/etc/elasticsearch/secret", 
                                            "name": "elasticsearch", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/usr/share/java/elasticsearch/config", 
                                            "name": "elasticsearch-config", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/elasticsearch/persistent", 
                                            "name": "elasticsearch-storage"
                                        }
                                    ]
                                }
                            ], 
                            "dnsPolicy": "ClusterFirst", 
                            "restartPolicy": "Always", 
                            "schedulerName": "default-scheduler", 
                            "securityContext": {
                                "supplementalGroups": [
                                    65534
                                ]
                            }, 
                            "serviceAccount": "aggregated-logging-elasticsearch", 
                            "serviceAccountName": "aggregated-logging-elasticsearch", 
                            "terminationGracePeriodSeconds": 30, 
                            "volumes": [
                                {
                                    "name": "elasticsearch", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-elasticsearch"
                                    }
                                }, 
                                {
                                    "configMap": {
                                        "defaultMode": 420, 
                                        "name": "logging-elasticsearch"
                                    }, 
                                    "name": "elasticsearch-config"
                                }, 
                                {
                                    "emptyDir": {}, 
                                    "name": "elasticsearch-storage"
                                }
                            ]
                        }
                    }, 
                    "test": false, 
                    "triggers": [
                        {
                            "type": "ConfigChange"
                        }
                    ]
                }, 
                "status": {
                    "availableReplicas": 0, 
                    "conditions": [
                        {
                            "lastTransitionTime": "2017-06-09T16:42:19Z", 
                            "lastUpdateTime": "2017-06-09T16:42:19Z", 
                            "message": "Deployment config does not have minimum availability.", 
                            "status": "False", 
                            "type": "Available"
                        }, 
                        {
                            "lastTransitionTime": "2017-06-09T16:42:19Z", 
                            "lastUpdateTime": "2017-06-09T16:42:19Z", 
                            "message": "replication controller \"logging-es-data-master-oawpjydu-1\" is waiting for pod \"logging-es-data-master-oawpjydu-1-deploy\" to run", 
                            "status": "Unknown", 
                            "type": "Progressing"
                        }
                    ], 
                    "details": {
                        "causes": [
                            {
                                "type": "ConfigChange"
                            }
                        ], 
                        "message": "config change"
                    }, 
                    "latestVersion": 1, 
                    "observedGeneration": 2, 
                    "replicas": 0, 
                    "unavailableReplicas": 0, 
                    "updatedReplicas": 0
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : Delete temp directory] *****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:274
ok: [openshift] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-Wu7DF6", 
    "state": "absent"
}

TASK [openshift_logging : set_fact] ********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:99

TASK [openshift_logging : set_fact] ********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:105
ok: [openshift] => {
    "ansible_facts": {
        "es_ops_indices": "[]"
    }, 
    "changed": false
}

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:109

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:132
statically included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml

TASK [openshift_logging_elasticsearch : Validate Elasticsearch cluster size] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:2
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Validate Elasticsearch Ops cluster size] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:6
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : fail] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:10
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : set_fact] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:14
ok: [openshift] => {
    "ansible_facts": {
        "elasticsearch_name": "logging-elasticsearch-ops", 
        "es_component": "es-ops"
    }, 
    "changed": false
}

TASK [openshift_logging_elasticsearch : fail] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml:3
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : set_fact] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml:7
ok: [openshift] => {
    "ansible_facts": {
        "es_version": "3_5"
    }, 
    "changed": false
}

TASK [openshift_logging_elasticsearch : debug] *********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml:11
ok: [openshift] => {
    "changed": false, 
    "openshift_logging_image_version": "latest"
}

TASK [openshift_logging_elasticsearch : set_fact] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml:14
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : fail] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml:17
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Create temp directory for doing work in] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:21
ok: [openshift] => {
    "changed": false, 
    "cmd": [
        "mktemp", 
        "-d", 
        "/tmp/openshift-logging-ansible-XXXXXX"
    ], 
    "delta": "0:00:00.002532", 
    "end": "2017-06-09 12:42:21.379994", 
    "rc": 0, 
    "start": "2017-06-09 12:42:21.377462"
}

STDOUT:

/tmp/openshift-logging-ansible-3uEmM9

TASK [openshift_logging_elasticsearch : set_fact] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:26
ok: [openshift] => {
    "ansible_facts": {
        "tempdir": "/tmp/openshift-logging-ansible-3uEmM9"
    }, 
    "changed": false
}

TASK [openshift_logging_elasticsearch : Create templates subdirectory] *********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:30
ok: [openshift] => {
    "changed": false, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/openshift-logging-ansible-3uEmM9/templates", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 40, 
    "state": "directory", 
    "uid": 0
}

TASK [openshift_logging_elasticsearch : Create ES service account] *************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:40
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Create ES service account] *************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:48
ok: [openshift] => {
    "changed": false, 
    "results": {
        "cmd": "/bin/oc get sa aggregated-logging-elasticsearch -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "imagePullSecrets": [
                    {
                        "name": "aggregated-logging-elasticsearch-dockercfg-4q8s3"
                    }
                ], 
                "kind": "ServiceAccount", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:10Z", 
                    "name": "aggregated-logging-elasticsearch", 
                    "namespace": "logging", 
                    "resourceVersion": "1565", 
                    "selfLink": "/api/v1/namespaces/logging/serviceaccounts/aggregated-logging-elasticsearch", 
                    "uid": "94f5cf46-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "secrets": [
                    {
                        "name": "aggregated-logging-elasticsearch-token-pzv44"
                    }, 
                    {
                        "name": "aggregated-logging-elasticsearch-dockercfg-4q8s3"
                    }
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : copy] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:57
changed: [openshift] => {
    "changed": true, 
    "checksum": "e5015364391ac609da8655a9a1224131599a5cea", 
    "dest": "/tmp/openshift-logging-ansible-3uEmM9/rolebinding-reader.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "446fb96447527f48f97e69bb41bad7be", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 135, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026542.24-53140031477355/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_elasticsearch : Create rolebinding-reader role] ********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:61
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get clusterrole rolebinding-reader -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "ClusterRole", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:11Z", 
                    "name": "rolebinding-reader", 
                    "resourceVersion": "122", 
                    "selfLink": "/oapi/v1/clusterroles/rolebinding-reader", 
                    "uid": "95bf2a24-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "rules": [
                    {
                        "apiGroups": [
                            ""
                        ], 
                        "attributeRestrictions": null, 
                        "resources": [
                            "clusterrolebindings"
                        ], 
                        "verbs": [
                            "get"
                        ]
                    }
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : Set rolebinding-reader permissions for ES] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:72
ok: [openshift] => {
    "changed": false, 
    "present": "present"
}

TASK [openshift_logging_elasticsearch : Generate logging-elasticsearch-view-role] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:81
ok: [openshift] => {
    "changed": false, 
    "checksum": "d752c09323565f80ed14fa806d42284f0c5aef2a", 
    "dest": "/tmp/openshift-logging-ansible-3uEmM9/logging-elasticsearch-view-role.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "8299dca2fb036c06ba7c4f620680e0f6", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 183, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026544.18-210774265388962/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_elasticsearch : Set logging-elasticsearch-view-role role] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:94
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get rolebinding logging-elasticsearch-view-role -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "groupNames": null, 
                "kind": "RoleBinding", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:13Z", 
                    "name": "logging-elasticsearch-view-role", 
                    "namespace": "logging", 
                    "resourceVersion": "1571", 
                    "selfLink": "/oapi/v1/namespaces/logging/rolebindings/logging-elasticsearch-view-role", 
                    "uid": "97031943-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "roleRef": {
                    "name": "view"
                }, 
                "subjects": [
                    {
                        "kind": "ServiceAccount", 
                        "name": "aggregated-logging-elasticsearch", 
                        "namespace": "logging"
                    }
                ], 
                "userNames": [
                    "system:serviceaccount:logging:aggregated-logging-elasticsearch"
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : template] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:105
ok: [openshift] => {
    "changed": false, 
    "checksum": "f91458d5dad42c496e2081ef872777a6f6eb9ff9", 
    "dest": "/tmp/openshift-logging-ansible-3uEmM9/elasticsearch-logging.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "e4be7c33c1927bbdd8c909bfbe3d9f0b", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 2171, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026545.79-142797329471506/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_elasticsearch : template] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:111
ok: [openshift] => {
    "changed": false, 
    "checksum": "6d4f976f6e77a6e0c8dca7e01fb5bedb68678b1d", 
    "dest": "/tmp/openshift-logging-ansible-3uEmM9/elasticsearch.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "75abfd3a190832e593a8e5e7c5695e8e", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 2454, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026546.11-161043132704242/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_elasticsearch : copy] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:121
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : copy] **********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:127
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Set ES configmap] **********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:133
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get configmap logging-elasticsearch-ops -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "data": {
                    "elasticsearch.yml": "cluster:\n  name: ${CLUSTER_NAME}\n\nscript:\n  inline: on\n  indexed: on\n\nindex:\n  number_of_shards: 1\n  number_of_replicas: 0\n  unassigned.node_left.delayed_timeout: 2m\n  translog:\n    flush_threshold_size: 256mb\n    flush_threshold_period: 5m\n\nnode:\n  master: ${IS_MASTER}\n  data: ${HAS_DATA}\n\nnetwork:\n  host: 0.0.0.0\n\ncloud:\n  kubernetes:\n    service: ${SERVICE_DNS}\n    namespace: ${NAMESPACE}\n\ndiscovery:\n  type: kubernetes\n  zen.ping.multicast.enabled: false\n  zen.minimum_master_nodes: ${NODE_QUORUM}\n\ngateway:\n  recover_after_nodes: ${NODE_QUORUM}\n  expected_nodes: ${RECOVER_EXPECTED_NODES}\n  recover_after_time: ${RECOVER_AFTER_TIME}\n\nio.fabric8.elasticsearch.authentication.users: [\"system.logging.kibana\", \"system.logging.fluentd\", \"system.logging.curator\", \"system.admin\"]\nio.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json\nio.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json\nio.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json\n\nopenshift.config:\n  use_common_data_model: true\n  project_index_prefix: \"project\"\n  time_field_name: \"@timestamp\"\n\nopenshift.searchguard:\n  keystore.path: /etc/elasticsearch/secret/admin.jks\n  truststore.path: /etc/elasticsearch/secret/searchguard.truststore\n\nopenshift.operations.allow_cluster_reader: false\n\npath:\n  data: /elasticsearch/persistent/${CLUSTER_NAME}/data\n  logs: /elasticsearch/${CLUSTER_NAME}/logs\n  work: /elasticsearch/${CLUSTER_NAME}/work\n  scripts: /elasticsearch/${CLUSTER_NAME}/scripts\n\nsearchguard:\n  authcz.admin_dn:\n  - CN=system.admin,OU=OpenShift,O=Logging\n  config_index_name: \".searchguard.${HOSTNAME}\"\n  ssl:\n    transport:\n      enabled: true\n      enforce_hostname_verification: false\n      keystore_type: JKS\n      keystore_filepath: /etc/elasticsearch/secret/searchguard.key\n      keystore_password: kspass\n      truststore_type: JKS\n      truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore\n      truststore_password: tspass\n    http:\n      enabled: true\n      keystore_type: JKS\n      keystore_filepath: /etc/elasticsearch/secret/key\n      keystore_password: kspass\n      clientauth_mode: OPTIONAL\n      truststore_type: JKS\n      truststore_filepath: /etc/elasticsearch/secret/truststore\n      truststore_password: tspass\n", 
                    "logging.yml": "# you can override this using by setting a system property, for example -Des.logger.level=DEBUG\nes.logger.level: INFO\nrootLogger: ${es.logger.level}, console, file\nlogger:\n  # log action execution errors for easier debugging\n  action: WARN\n  # reduce the logging for aws, too much is logged under the default INFO\n  com.amazonaws: WARN\n  io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL}\n  io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL}\n\n  # gateway\n  #gateway: DEBUG\n  #index.gateway: DEBUG\n\n  # peer shard recovery\n  #indices.recovery: DEBUG\n\n  # discovery\n  #discovery: TRACE\n\n  index.search.slowlog: TRACE, index_search_slow_log_file\n  index.indexing.slowlog: TRACE, index_indexing_slow_log_file\n\n  # search-guard\n  com.floragunn.searchguard: WARN\n\nadditivity:\n  index.search.slowlog: false\n  index.indexing.slowlog: false\n\nappender:\n  console:\n    type: console\n    layout:\n      type: consolePattern\n      conversionPattern: \"[%d{ISO8601}][%-5p][%-25c] %m%n\"\n\n  file:\n    type: dailyRollingFile\n    file: ${path.logs}/${cluster.name}.log\n    datePattern: \"'.'yyyy-MM-dd\"\n    layout:\n      type: pattern\n      conversionPattern: \"[%d{ISO8601}][%-5p][%-25c] %m%n\"\n\n  # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.\n  # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html\n  #file:\n    #type: extrasRollingFile\n    #file: ${path.logs}/${cluster.name}.log\n    #rollingPolicy: timeBased\n    #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz\n    #layout:\n      #type: pattern\n      #conversionPattern: \"[%d{ISO8601}][%-5p][%-25c] %m%n\"\n\n  index_search_slow_log_file:\n    type: dailyRollingFile\n    file: ${path.logs}/${cluster.name}_index_search_slowlog.log\n    datePattern: \"'.'yyyy-MM-dd\"\n    layout:\n      type: pattern\n      conversionPattern: \"[%d{ISO8601}][%-5p][%-25c] %m%n\"\n\n  index_indexing_slow_log_file:\n    type: dailyRollingFile\n    file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log\n    datePattern: \"'.'yyyy-MM-dd\"\n    layout:\n      type: pattern\n      conversionPattern: \"[%d{ISO8601}][%-5p][%-25c] %m%n\"\n"
                }, 
                "kind": "ConfigMap", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:27Z", 
                    "name": "logging-elasticsearch-ops", 
                    "namespace": "logging", 
                    "resourceVersion": "1623", 
                    "selfLink": "/api/v1/namespaces/logging/configmaps/logging-elasticsearch-ops", 
                    "uid": "9f2f37b3-4d32-11e7-ae30-0e910886c5dc"
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : Set ES secret] *************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:144
ok: [openshift] => {
    "changed": false, 
    "results": {
        "apiVersion": "v1", 
        "data": {
            "admin-ca": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMyakNDQWNLZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUyTkRFME0xb1hEVEl5TURZd09ERTJOREUwTkZvdwpIakVjTUJvR0ExVUVBeE1UYkc5bloybHVaeTF6YVdkdVpYSXRkR1Z6ZERDQ0FTSXdEUVlKS29aSWh2Y05BUUVCCkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUtpMlFDa2NEeTNHQ3krU3N0YXVaZXpmcTB5T1Iyd0sxNWhOQWlEU3BGNzEKQ0p5VzlnYnNPN2duZ28rNjdsdmM4WlNYT3pYSkJtQWtFSEZUQ0l3c0hZUWY1Y2pmbUY1d2k1a3BlVUdmZTBaUQpkUnR3RzllQmU5K0srVnJqQnpoTUxaQTVjY1hSTE5QT3pQUzFHOWFyd2RzSjFZT0FkVzdXN2pzMnFwUXdESzJxClFOK2x3STh1ei9kWDFLWnJxM1lnVXlHRWJpeGQ0Q1l3SU9VUGV2QUxaUGtrbFNFNnZhaVVCRkY1blNCWXJzZ2YKWUlxOEtLL0ZUUXcrUHpIOGl1ZlRsNlY2ZHlka0lYbVphbkZTdGExM2VaRjNNM1lwd05Mbk1Gazg2OC9UNUF4SgovWHRQNjNqOUtnNS9vbHdMTDgyQUdFajB6WDFNOU96K2NOdkNOb0J2alBrQ0F3RUFBYU1qTUNFd0RnWURWUjBQCkFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFKWTkKRHRsWkxnYkNhQUwwbzYxSVdoaVo0NlRHNnE2RmNYSEF3Yk9sMmhLSDJpUHhwOXZPc3htR2ZiZS9WckJQNkxnMgp1U3ZMZUlId3VQajA1M0MvdENvZFBTS3oyQnBnd05ZNWp0TmhYRXdJRnRpdWFIVlRJZmRnWWg3OGZNbUcwcnFSCkkwUG1ISU9tV2l1NDkyZ0JSb3hpWE1mZEc2azJ4cXlDU3lQeHVEN1NTZU1YT3ZKWWtBMHNCMlVnY1lCbTYxTGcKdzFicXloNEYvWU51U2VGRGthWjFDTlNmNUN4Mi9EOUxsQVFjSktTUnU0c1pDcC9VWmJzalBjcGQvaGtBOURwaQpBSTJzQXUyZVMxREx3dDJKMVl5RmhrdXBXUjJwR2JmdGZzWUhPa2djdnp6NzFSWTF4NkNoclhPSHY5MHVjTXBSCnhvUTNiN2E4VHg4WFhqcUtLc3c9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", 
            "admin-cert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURQRENDQWlTZ0F3SUJBZ0lCQlRBTkJna3Foa2lHOXcwQkFRVUZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUyTkRFMU1Wb1hEVEU1TURZd09URTJOREUxTVZvdwpQVEVRTUE0R0ExVUVDZ3dIVEc5bloybHVaekVTTUJBR0ExVUVDd3dKVDNCbGJsTm9hV1owTVJVd0V3WURWUVFECkRBeHplWE4wWlcwdVlXUnRhVzR3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRREEKMkM3MDdBL0szNXBBU1BPMTNMOGg5cWVUOUhqVFR1OXkyZFNDYXdqQU81QitFcDhiandHUTNmS0pkc01xRDVzYwp6aDdYZHlzdml6UytXUk1mSzFBYnM3a2pYd1RHTzJ3QVBJZHJ1SlpFWnFaVFd2eTJhMyt6K0lIT05HWDdEQ3FuClBuVFlldVVyVVlVZ1pycVdPVnl2L3poUmlHb3JBM3Z3OUZKN0lSMGJEaGlZWXR1TkVkWFpjVlY5VGptWmpHa24KeUZTaXlsUk9EWWJaZlFTZ1RCcGhVMmJ1WGhhQVdXUTBzbGViMmhHTWxsWEZVTXVMbmZYK3M1ZW9sNFl3QUxXUgo5U2ZJRXp0NDRiaFhPYWlWNm80a0hKSno1OXZuZlRQc3JncVVzR0htaUxnVnRtVHpLTVJZWklJOWt1KzVhRDdoCkVwUGh5emJMM2ZSZk1paGN0S045QWdNQkFBR2paakJrTUE0R0ExVWREd0VCL3dRRUF3SUZvREFKQmdOVkhSTUUKQWpBQU1CMEdBMVVkSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFkQmdOVkhRNEVGZ1FVcFhjYgpqdHdTV1A1YVBJcVVhb2JITWFDRzE0SXdDUVlEVlIwakJBSXdBREFOQmdrcWhraUc5dzBCQVFVRkFBT0NBUUVBCmgxZFphay9KSTdCbk1zZmd3N1V5NEg3eGRwMExKVkdiNDBCUGNPYzlWKzBZMnNCYTRYVWRtTThKSXNwbWhHVjkKalRxaEdNNlBqY3JmNFVJMGw1SmxBdGY3cjRmd1dPNWJ4U0MyVEw5MGdUc0lsMlkwek1yY2JTaWVMa2pNeXBIdAo4NG5DUklraFRVN3BMRGlucHdMeW56Z2lGQ1VWM3MxNi9yVm1KRFlQSU80VzZsVGNsNmxGQVhYYW50V084VXhUCjdabUNMSUxMbTV1MERZbU5acndQYWVLL3ZWbDV2Q2Z1amRhcXNSQmQ4NCtRRDBNZHJ1Z3JlemtISWJoNmxUR28KR0Q4NFBLVm05aHpOYVVpVEtLZHVZSkxFMCthTHkyMFJURndRb1dkR1NwVnZRVmRZVDduWlhhZ1hpbW5ISlE2WQptVzVsWFFTdVptSmtzbzNrdndRN0tnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", 
            "admin-key": "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRREEyQzcwN0EvSzM1cEEKU1BPMTNMOGg5cWVUOUhqVFR1OXkyZFNDYXdqQU81QitFcDhiandHUTNmS0pkc01xRDVzY3poN1hkeXN2aXpTKwpXUk1mSzFBYnM3a2pYd1RHTzJ3QVBJZHJ1SlpFWnFaVFd2eTJhMyt6K0lIT05HWDdEQ3FuUG5UWWV1VXJVWVVnClpycVdPVnl2L3poUmlHb3JBM3Z3OUZKN0lSMGJEaGlZWXR1TkVkWFpjVlY5VGptWmpHa255RlNpeWxST0RZYloKZlFTZ1RCcGhVMmJ1WGhhQVdXUTBzbGViMmhHTWxsWEZVTXVMbmZYK3M1ZW9sNFl3QUxXUjlTZklFenQ0NGJoWApPYWlWNm80a0hKSno1OXZuZlRQc3JncVVzR0htaUxnVnRtVHpLTVJZWklJOWt1KzVhRDdoRXBQaHl6YkwzZlJmCk1paGN0S045QWdNQkFBRUNnZ0VBSnVFdkRtZ3RndXh6Ny9RRlZxd3J5TkF1MXhDWjc1aGhoRmxRay82NlkrOWUKWVZ5bHdXdWltQzkzUDZtM0RWcmFIZ09jaWpwWGRaMXVROVkzbWJna3pZRDltWmtGb0dYSWozK2ttZDQ4Ull3ZwppbnUwcVo2Z3Y0c2h1STR0NVdCOWxQeHRtRnVtcW9Zai9mVllOd05Yb1pBbUxhSUFMYTNsSmg1TUp0RDFTa2ZDCm1lL3kyaGEydTY1Q3d6NVQ0SUlhU2JrUkF1dkRZbmt3Q21qWHF2SGVDc1lsdWhCbldlNS9OYkRlWWRaMGpDaW8KMVhBMlNGNmlJZjZKU1g1Z09tZy90b09uMFQ5MzdTdm1nSHp2VzUrUjQ0dEFmeWg0SWlkR2NpV3I0bzlORmZScApabzB2Q0ZCaG9iQWpDK3hzLzFiMzFhWC9TTzdSNmYwNzU2RXRnS2c5alFLQmdRRG5JNGlYM1o4L2kzWmczVXc5CjdhaVU5eDE4aVNXb1ZQdURPSDBxV0oyMVNmK3dSc09ZTXhOY0t0ZFk5TUZ2dzRxaWhyNlMvQjhYOHZLLzZVcDUKUnE3aTVqV1Izby8vK2ZhWm5oMzJ4eDMrOHZiM3huVnBveTZjZFRmbUF6UG5hdmRSdUt5YU9GWi9aTHhvWVF2NApUY3RjVWVwVzU2V1JON3V4NE1pNU8rSzR3d0tCZ1FEVmxqU1lGTjB3NklpZHJpWlBBWUlDV0NwenZSZ0xDbkJEClRYQWI3UmZwSGt4Y1dDbitzVW9kNDhOUk52VXJKQ1JzTWVZdGQ0TFpUbWpDVzFwRS9JZTlwMXpPWDFOYWo0YXEKN25CT3U3eFFNNzJLZ3MyOEszcmFBdGVLSlczV2dpQmNMRWVnQ1kyQ1Q0UnY0bmRWSHlrS0VtelRsRXFZL1JxYgoxWHNaOWFOdXZ3S0JnUUNya2dYSGJOdlRxR3NEdGVEdEt5ZHpCQWVnTkpGK1pTNlpGZXBhNzVLSlNsZUFldGFmCmVISkxmUVdDNDVOZXFkQ1RtZCtHYloyTEFtek1wWmdCYzhWSGFjaWo4WWdFWTRCNFFSVk81RGpRMFhzZkt2Sm0KcXBsankrTGxXc1ZCWjNZVDlEZ3k5ZFRIRlhpQkptQmpQU1BSOElBNWFCUFBPb2Fab0JoKzN3UUpUd0tCZ0JMNwoyOGJjcStCSitzS1JGUVNXckxpS241TlQ3a3drN0Y5L0JZVGw2T2NtYzBFTjhRV2JPbXczcjBlTHEvVGVySUlWCldWRVhPMEVYVUFaZVdrS2hjMnN1OGZmQnowWFFWZmJoWklnSzlhem5QR1NRUmJzQTNsVjhSUzRaTDQ4T1Y5UDUKZDVCSTZSUDQvMXQyZEliVFV6RjYrOE0rZmZibElEVlRuekpxZTlFL0FvR0FRc09PZGFpTS9uMTY2dTZVeDg2WQprMVd6QW1pQUtaQ0s5aVBrdDVHQm5xYUd4MWk4aXJLWDJCYUF5cjJlMTd4dnJuUVlybkQ2WEVZYTkvc0c1SlJ2CjFsS21hcU9SQzRUdmdyeXFGR1ZNT2pzM1JSUXNGeWNpU1M2T0ZTWVVnd3FqUzMrTlM1bnQxWG1sdjROWmJqTHQKTXlBakNZVXgyd1Z2QVlWQmJoNm14dEE9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K", 
            "admin.jks": "/u3+7QAAAAIAAAACAAAAAQAMc3lzdGVtLmFkbWluAAABXI278/EAAAUDMIIE/zAOBgorBgEEASoCEQEBBQAEggTrp/Mbn1oE3GAxw1DzyUsMKPbuId2gz+8RYqzP+xrNaAqS8IwW3kdBIfxDMKN/gP27S0F9Nfq0k9nAt7GnJAX3GGcOEy0AhineW/VP2Vcd7sAjt1LIjhCspWpuCjx5kSZRigVsu42OdWpcGca7M9KZh26L/Aud35qMmWj3shMgCXiJicnGoyGkmTl5g8AR5yoqX4Nt+1NtDH4D9lbZh++Q7kyuYV3IB2p3TBchwEazYHngKU3CaQinR6fsJw1segyCL4mI6irsjjkfdWl03LD+2PcIAvJbT6Onb2CkX69OrjsM/pcE8J2fZIDdcQbQgY7y89r+hRO+pvQU08FRg1ev5Y0Cxc8Fi3PFKr/KnnJz7zpbDlRG3jsjoZVtzKPzttKrEEYhQNRLoDEtMt1DvTT8l/Ueq7tXXV3Jw2mt/Z2XTCgho0OmYl1uGIpPjMw2prv763b9n6k0E0NiaglzP99nedfxQNpABllzj5zcYDoqHilPJjm6dTH9lxcnymXKrKIUCHiEXMAr41sBhqhTMT7hIeU8L1MzgVVarXtu0cUJR++cToWGESKL0sLKKvuRdZykr/ab/7DTKOFWwfnnC2GuTLyj2lrg+DJ3rAVYVKRTYoCepGXk6MxPWn1AwiICd5JGmmWa6ER7WcrZCBFtc0mk107lT+G+WEXxXLeQATnsgMVhGLtxELBAHd5FrZfz/21NL6xrcHPBHSX7ShzijGzq6/VeG8rBajDT/UTihyPhaHARTN+mTXbNV+LxODd99/BrDnvoYWLkBVNHpeEcWIrhqzWBLaiPjRE+yjcSapNxE09+Cehrr+i5+tmrAuJnfLOwTFRxE21kY1EynwQ8f6rLFRZ8LN9Sg7fxh8Q46SVElElDy/W2kks20DRCPMx33+dPQIWJiK5IuLbwVZrWGy+4sX/Rx4rp8G/OpygndCi78tM1cQ+xuiKN7v3xW46tJo6fSF+0X+d3kgZljnIWGado53xHd152gkPRUMjKbOriIKp3iyHkVpiZAw/po1BjO9NAie9L3dMflEQd2B03WdGRUrhLjHD7jcNiWBjisSP7qzOMpvnzSK3GCd7BWflLHHZ2uyzss5SKi5TjM9I67UQxJm180LwVI1CRluSFRpSdxflnLwqV9DdiCiuXNb7zigOCIW4GbkWs/pyTrWUR2fLvVg/idc1YBuM0fTLVkxA++zb8nUOGa+R/hAH9/I6QXxoGPw7crH1FK1CUtzv7lX/ULbRuq4gPrUcUo55/bbx5YAy4sq0+kW1lw7k9mTVmRKppapI0KwMi2RhCF8eB2Lk2L2aH3lsWov9H/PnDUnWn0PZwN830rEPIZYLsW7oid0CKuZWx/aYuo/VVMMI1T/kpoNDAgJs4M+8xFqJCB4QhKaloKqqixwm0VhBFxV59FduAAQKIGDNbX8xxquYfum/SAG6SjO4qRDi14JTLiuY+kBtUx7a4wdBkSi+GqHt0vH6NmQBMD405xsfc1aauBkGKnB6wzYW5vGw//CIjKANV5jzGZkO/btAsF4hCGXfaXlThHSDn2bAqvJqbZ/7JZhq2WCbr2lZnSofQTD5ImqmOeFrImfMRziXwHTE1KYac9nVTyMOegYT5IT2ri+VM2YbET2KNOBG5MZ/Z0d185h4+1kKWAb1Z/tdNbjQ8/Aj6RDisSUm4/fjziIXcSpQAAAACAAVYLjUwOQAAA0AwggM8MIICJKADAgECAgEGMA0GCSqGSIb3DQEBBQUAMB4xHDAaBgNVBAMTE2xvZ2dpbmctc2lnbmVyLXRlc3QwHhcNMTcwNjA5MTY0MjAzWhcNMTkwNjA5MTY0MjAzWjA9MRAwDgYDVQQKEwdMb2dnaW5nMRIwEAYDVQQLEwlPcGVuU2hpZnQxFTATBgNVBAMTDHN5c3RlbS5hZG1pbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAIjEphtOk+AKcZlpeksI6KpxUa4Of9BCvgeA1kjZCdSHO4F1QuMH8lnzyDC9/LPpbi09WbtQviRz7lfrqP4tDf5KJkaQN9gAnmRW54Lo2jsI79FVp6J5Y10F+qwnkL6yU4NGA1rwqeJ3N1OHKF/1VmyCfPAXsK9UAfaDQ3yzC9ien/NdQfzhR/KwUkfGcXPxQlN+aloi10ZO8tv8Dvwr34LloDrYnIyPFysssYDIipWBmkRA2xvdcJ2zQYzaEdHGjlMFiK1h4G7Z5u5ySWrPkSQLPyuYgBmijkygYJ9YvaeX3k0lUPQpWbgOGMnhpm393dkl4O6Sez+HmXIPrX0e6RMCAwEAAaNmMGQwDgYDVR0PAQH/BAQDAgWgMAkGA1UdEwQCMAAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQ011Ed2U0znD/SOYtTVJw9HdXzrDAJBgNVHSMEAjAAMA0GCSqGSIb3DQEBBQUAA4IBAQCVlZYNHPiP8EOy/+F/afL37nxD+9Epvsi9wkOoAfQ9W0efjoXjHt2TQY6RUC37Hq79zVf/UBncbl06WbuWP8o8pWMda4zTxnLHfLM3UZTvFL85lO94qU2+bqxJOEDgnszTDOVVc851+q6+nRJkswjwN8BxQz1y7l4N9KLNK9LiWMlwnvAYP8xFNekZHQt9bWZuMxd3q8FR518m6gFbA6m3FRSpxJ6regDPhL9iHqKf/BdikrGWfTznp1rNd63WCG7z66pA/zCWBmO+Vz3d7ySLZKldS/kRz3Xs+8Z87WJg4oLPZjs8L6AGrd7kj4fUqMraAaRG8xLCGqyWvm+XqdEDAAVYLjUwOQAAAt4wggLaMIIBwqADAgECAgEBMA0GCSqGSIb3DQEBCwUAMB4xHDAaBgNVBAMTE2xvZ2dpbmctc2lnbmVyLXRlc3QwHhcNMTcwNjA5MTY0MTQzWhcNMjIwNjA4MTY0MTQ0WjAeMRwwGgYDVQQDExNsb2dnaW5nLXNpZ25lci10ZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqLZAKRwPLcYLL5Ky1q5l7N+rTI5HbArXmE0CINKkXvUInJb2Buw7uCeCj7ruW9zxlJc7NckGYCQQcVMIjCwdhB/lyN+YXnCLmSl5QZ97RlB1G3Ab14F734r5WuMHOEwtkDlxxdEs087M9LUb1qvB2wnVg4B1btbuOzaqlDAMrapA36XAjy7P91fUpmurdiBTIYRuLF3gJjAg5Q968Atk+SSVITq9qJQEUXmdIFiuyB9girwor8VNDD4/MfyK59OXpXp3J2QheZlqcVK1rXd5kXczdinA0ucwWTzrz9PkDEn9e0/reP0qDn+iXAsvzYAYSPTNfUz07P5w28I2gG+M+QIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAlj0O2VkuBsJoAvSjrUhaGJnjpMbqroVxccDBs6XaEofaI/Gn286zGYZ9t79WsE/ouDa5K8t4gfC4+PTncL+0Kh09IrPYGmDA1jmO02FcTAgW2K5odVMh92BiHvx8yYbSupEjQ+Ycg6ZaK7j3aAFGjGJcx90bqTbGrIJLI/G4PtJJ4xc68liQDSwHZSBxgGbrUuDDVurKHgX9g25J4UORpnUI1J/kLHb8P0uUBBwkpJG7ixkKn9RluyM9yl3+GQD0OmIAjawC7Z5LUMvC3YnVjIWGS6lZHakZt+1+xgc6SBy/PPvVFjXHoKGtc4e/3S5wylHGhDdvtrxPHxdeOooqzAAAAAIABnNpZy1jYQAAAVyNu/NRAAVYLjUwOQAAAt4wggLaMIIBwqADAgECAgEBMA0GCSqGSIb3DQEBCwUAMB4xHDAaBgNVBAMTE2xvZ2dpbmctc2lnbmVyLXRlc3QwHhcNMTcwNjA5MTY0MTQzWhcNMjIwNjA4MTY0MTQ0WjAeMRwwGgYDVQQDExNsb2dnaW5nLXNpZ25lci10ZXN0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqLZAKRwPLcYLL5Ky1q5l7N+rTI5HbArXmE0CINKkXvUInJb2Buw7uCeCj7ruW9zxlJc7NckGYCQQcVMIjCwdhB/lyN+YXnCLmSl5QZ97RlB1G3Ab14F734r5WuMHOEwtkDlxxdEs087M9LUb1qvB2wnVg4B1btbuOzaqlDAMrapA36XAjy7P91fUpmurdiBTIYRuLF3gJjAg5Q968Atk+SSVITq9qJQEUXmdIFiuyB9girwor8VNDD4/MfyK59OXpXp3J2QheZlqcVK1rXd5kXczdinA0ucwWTzrz9PkDEn9e0/reP0qDn+iXAsvzYAYSPTNfUz07P5w28I2gG+M+QIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAlj0O2VkuBsJoAvSjrUhaGJnjpMbqroVxccDBs6XaEofaI/Gn286zGYZ9t79WsE/ouDa5K8t4gfC4+PTncL+0Kh09IrPYGmDA1jmO02FcTAgW2K5odVMh92BiHvx8yYbSupEjQ+Ycg6ZaK7j3aAFGjGJcx90bqTbGrIJLI/G4PtJJ4xc68liQDSwHZSBxgGbrUuDDVurKHgX9g25J4UORpnUI1J/kLHb8P0uUBBwkpJG7ixkKn9RluyM9yl3+GQD0OmIAjawC7Z5LUMvC3YnVjIWGS6lZHakZt+1+xgc6SBy/PPvVFjXHoKGtc4e/3S5wylHGhDdvtrxPHxdeOooqzNy9gVgFNP+CLCcu/TelMtS/OlNn", 
            "key": "/u3+7QAAAAIAAAACAAAAAQAKbG9nZ2luZy1lcwAAAVyNu/78AAAFATCCBP0wDgYKKwYBBAEqAhEBAQUABIIE6dQJCeHWrjqO0eJFwZOCTHBxA811UL5mVHhlOt8C49DFMijKEp/xVc4sVHMx0L4tGezHhE8Vz0IdKDZizBl8Eofzb+XWwpdApXB87R9CdGhqwtqLv7gwDUrHz599IcEIL7HRMkpM+Ikz8GqOGRm0D4maAxKstZvM5tzqyG/eGLXttoUkURVgPuFwBxAbxEg7jI203crwX7zCF6DFFqIL/8ms0SvnlCiZEbAIw94cKC/qKWRogG8q/dZm7Wy77EfdT75TDvKf7AWNloPbGX3whlNS7HBXrAo9lJ0KotHqiRG1ao3ngCpUSrDiuZthoBRgsOQX9cVMDprYiZhLb5fXSptWoO0nbVG+3H08c6mBQ8PB62as1sXa872igv/wEWh+8goz+SUSzoT8ZM0bpH8EHaJ0TdxWJTGa6zbWWwtm6zaTodBwp6JPBv0cQNSfdGX15p2Oz0uRTSRBQS6RreVtpD4BgrxtS4m1yF21Ox5iKcnG0zYa39hucaCJfbFqgtn2VmuhYN0Oxc0E8sTNTY7Fq4/F0a1n8U1FjdXUBNhqGMoHmDbTjitOqy99u4D2HxIlXrDcXB09qeXi5i7YwYoJ7IejKFCWMb2NFkCl6F3RHbGYyO3NX9BYDwGXFblTZcjIUM+563x49ag17nj1GnrlNQmahT9R9ixAUfit4XfvvF4ye5nHCynaVNEzlH8yGUhoiE6AxeSGtLQHqKHu5QjJX1Ge18ojOwDHHuUECGZ787s9G7Oy+Gf+WSInGxHAgsVYSizyaCSgUGjGYzsvwnsfrRgdSuZlvJ0hjyMI8PNRooALRAc7osTsPe/Ec2TBzlbat2AmDieCRgaC2GbuaA61ZRJWi33FU9QZWeolyBIldcaY7HtPB8CelSarpFi5VlpV0r4jToyXdPAqRPRBG3UG8NUXycGlCNkQY6rbPQdCCCTFh0S+tkqiEtBMgUcahld7romUctdbIrw/VcjOxSLfVDaAfTuCKv/h44BkBhkXfcgc7gxFmBVoHvvuCtkFgApK/W4mf7yLEr8Ique3Y7nZY5Gah/bfvcP4pZTFusBsO0atLWTlcIWOiZmRmOXNeovOgDlFJlliq1MdfkkVeu4gzXIks5ZaOVkMGDhxr6awMk0Fp/Vtp1OYMcq/NTC+7xRUvw6F3NL1H1GR/qFMs1c1O97SQmknHE2odr9QiLz+PWRPhn0czwoB3s4B4ydrNpcN9AHw/JG/1X59L0sViVHflqEsSGo2ltvekKbAwX7mpcSgGVhvqc71BuCoXlFMOZVROL7//X0dW7gE/a12n6VBT/eQuph7xFC7IDIccvmEuKAASXk/tEi61XlNsGcum8JMho78+UOIJ31wP4ocpLfMbLAsSABJpuznXlHPKHBhLXox9f+X9eeQdn7pSkfs/bEEPvNMkItAPdY9oD6lOw9efAOlfG3obawcz6ep0mxLKheXJEq8LaE9jAbFW3q5EP26mPbr6krKND7R3Fc3hI+0P0qQgDMok5Nmm/lCbUkGqkElXDV7h7/87A+4bj7jmCdnkOOBKNENYiZvKGVNlw8rQsVcDfbZE75xXhfchE+sbEIBQODEymf0BCgZO5uEr8J//rt/d2qiuU4Tqb6HCpM9mGBFDvoXh76iXipdzy2xzN9t5KLVP3purmJJBG2r7a5ORvJWaClQJUGg5gAAAAIABVguNTA5AAAEXDCCBFgwggNAoAMCAQICAQgwDQYJKoZIhvcNAQEFBQAwHjEcMBoGA1UEAxMTbG9nZ2luZy1zaWduZXItdGVzdDAeFw0xNzA2MDkxNjQyMDZaFw0xOTA2MDkxNjQyMDZaMDsxEDAOBgNVBAoTB0xvZ2dpbmcxEjAQBgNVBAsTCU9wZW5TaGlmdDETMBEGA1UEAxMKbG9nZ2luZy1lczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJ2ucuOAP5lfo2t4jJDHSSiZervrDJqthHJuoqUUArdDC2OR5pdCbg7RvAMd5GYcBsHVscRxBTf0occzc3Fssv8dT1fxz31FxpDnCyJ8kzV0grgDtROaIFD4Z9VEebYF3SV7IBHwiJauP8WqwA6kACVrceOSYwpafDDnIpbLbukVJw6fGDByah0tb3zD/nejgSRYSds1svju8WjTK8XuQzVpGisRbcz7LApbkieSGU1p7VjuHJN9NTROIMHHzKL5RlNUTw4WPHiMdGCHuwVEErBQ8EVGIN9fb/ZkfwsLAKeK2hz4M5uLdBFWSfxbdK9u6YHDc6fG9BSc3r/AuSB6sSkCAwEAAaOCAYIwggF+MA4GA1UdDwEB/wQEAwIFoDAJBgNVHRMEAjAAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUlZDo64ZiTEm7ov0v8Z1Q2TcczqkwCQYDVR0jBAIwADCCARYGA1UdEQSCAQ0wggEJgglsb2NhbGhvc3SHBH8AAAGCCmxvZ2dpbmctZXOCJGxvZ2dpbmctZXMubG9nZ2luZy5zdmMuY2x1c3Rlci5sb2NhbIISbG9nZ2luZy1lcy1jbHVzdGVygixsb2dnaW5nLWVzLWNsdXN0ZXIubG9nZ2luZy5zdmMuY2x1c3Rlci5sb2NhbIIObG9nZ2luZy1lcy1vcHOCKGxvZ2dpbmctZXMtb3BzLmxvZ2dpbmcuc3ZjLmNsdXN0ZXIubG9jYWyCFmxvZ2dpbmctZXMtb3BzLWNsdXN0ZXKCMGxvZ2dpbmctZXMtb3BzLWNsdXN0ZXIubG9nZ2luZy5zdmMuY2x1c3Rlci5sb2NhbDANBgkqhkiG9w0BAQUFAAOCAQEANb6UOT8j8QtTcQKOMhIhsmxfEh6Rnd5MDG+2I324OdjubEvfKkjpoCAdE+uPAGtT4ir/vyPyVmEb6JNo4US20tOCBrxejxJTCURpIl6anWKW8z5atlrHoKBQFMH8OzmGTGle3odoepwr/J4pW6DyxwLZGqXUBqOB9UDFeCTd7UeIWY5n6pss+qDM16oGkk/IFRkUDIrJChmVHO4JScY+1RQ4o/36gUeFV0K2wmUPwu4lO3iTfBw2kDNsDrqDRfIAIdR8+Q/Ry3w1w3Dqm+/wNAAg8DWKhnE/VN/HOrXPsHd6uRMDYtm3+QNqfUA5syeJe54bkby14eBaXtTmhRr2JgAFWC41MDkAAALeMIIC2jCCAcKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAeMRwwGgYDVQQDExNsb2dnaW5nLXNpZ25lci10ZXN0MB4XDTE3MDYwOTE2NDE0M1oXDTIyMDYwODE2NDE0NFowHjEcMBoGA1UEAxMTbG9nZ2luZy1zaWduZXItdGVzdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKi2QCkcDy3GCy+SstauZezfq0yOR2wK15hNAiDSpF71CJyW9gbsO7gngo+67lvc8ZSXOzXJBmAkEHFTCIwsHYQf5cjfmF5wi5kpeUGfe0ZQdRtwG9eBe9+K+VrjBzhMLZA5ccXRLNPOzPS1G9arwdsJ1YOAdW7W7js2qpQwDK2qQN+lwI8uz/dX1KZrq3YgUyGEbixd4CYwIOUPevALZPkklSE6vaiUBFF5nSBYrsgfYIq8KK/FTQw+PzH8iufTl6V6dydkIXmZanFSta13eZF3M3YpwNLnMFk868/T5AxJ/XtP63j9Kg5/olwLL82AGEj0zX1M9Oz+cNvCNoBvjPkCAwEAAaMjMCEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJY9DtlZLgbCaAL0o61IWhiZ46TG6q6FcXHAwbOl2hKH2iPxp9vOsxmGfbe/VrBP6Lg2uSvLeIHwuPj053C/tCodPSKz2BpgwNY5jtNhXEwIFtiuaHVTIfdgYh78fMmG0rqRI0PmHIOmWiu492gBRoxiXMfdG6k2xqyCSyPxuD7SSeMXOvJYkA0sB2UgcYBm61Lgw1bqyh4F/YNuSeFDkaZ1CNSf5Cx2/D9LlAQcJKSRu4sZCp/UZbsjPcpd/hkA9DpiAI2sAu2eS1DLwt2J1YyFhkupWR2pGbftfsYHOkgcvzz71RY1x6ChrXOHv90ucMpRxoQ3b7a8Tx8XXjqKKswAAAACAAZzaWctY2EAAAFcjbv+WQAFWC41MDkAAALeMIIC2jCCAcKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAeMRwwGgYDVQQDExNsb2dnaW5nLXNpZ25lci10ZXN0MB4XDTE3MDYwOTE2NDE0M1oXDTIyMDYwODE2NDE0NFowHjEcMBoGA1UEAxMTbG9nZ2luZy1zaWduZXItdGVzdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKi2QCkcDy3GCy+SstauZezfq0yOR2wK15hNAiDSpF71CJyW9gbsO7gngo+67lvc8ZSXOzXJBmAkEHFTCIwsHYQf5cjfmF5wi5kpeUGfe0ZQdRtwG9eBe9+K+VrjBzhMLZA5ccXRLNPOzPS1G9arwdsJ1YOAdW7W7js2qpQwDK2qQN+lwI8uz/dX1KZrq3YgUyGEbixd4CYwIOUPevALZPkklSE6vaiUBFF5nSBYrsgfYIq8KK/FTQw+PzH8iufTl6V6dydkIXmZanFSta13eZF3M3YpwNLnMFk868/T5AxJ/XtP63j9Kg5/olwLL82AGEj0zX1M9Oz+cNvCNoBvjPkCAwEAAaMjMCEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJY9DtlZLgbCaAL0o61IWhiZ46TG6q6FcXHAwbOl2hKH2iPxp9vOsxmGfbe/VrBP6Lg2uSvLeIHwuPj053C/tCodPSKz2BpgwNY5jtNhXEwIFtiuaHVTIfdgYh78fMmG0rqRI0PmHIOmWiu492gBRoxiXMfdG6k2xqyCSyPxuD7SSeMXOvJYkA0sB2UgcYBm61Lgw1bqyh4F/YNuSeFDkaZ1CNSf5Cx2/D9LlAQcJKSRu4sZCp/UZbsjPcpd/hkA9DpiAI2sAu2eS1DLwt2J1YyFhkupWR2pGbftfsYHOkgcvzz71RY1x6ChrXOHv90ucMpRxoQ3b7a8Tx8XXjqKKsysFqTC978uaK8aLKdwou0+mtSsuA==", 
            "searchguard.key": "/u3+7QAAAAIAAAACAAAAAQANZWxhc3RpY3NlYXJjaAAAAVyNu/pkAAAFATCCBP0wDgYKKwYBBAEqAhEBAQUABIIE6QbbNunexfbFcw1v/IEpW8DeH4bWjaqf01nb5nbwv0f99xtOuTaWKBzbeNGxw3qkp1W4+2PcpkFA49D3dy0T7cpc1L28xkIotKHMi1q91y3Ii3qYdCah/kjZrrgtPvBU5QCyUUDg1qkYQdRkO6i9vYpgsBcS6VwroBC3Y+NkcFLpf95YNiqqUH811VjxCOrrNJU4teBQ9M/ZO967dFk71WwvYOtOjKtpNySNPR7eNJb/PKvErQ2jwMVflH6mDhIYi0FOAl25WStMH45I+NlpmkRg6ZY1F8N49NaizwBvMiIjZ4L025O2cwFLeMh8R3kgbIgFtLBsZb5odTMV4oq+At88ZDgQLqi+pF2px5qKHF7KtKkeN1BD+ERebu5/7TRP+dNyu+pq0WCusthueonylwfHouC36pwZiGtveG3TW9cNIMirKhv95Zd7J+IfjukYUirgKnvyA9Da75hdl6LxP0T7fcXQ37MLL4bmJ1OeGWngfVgBpGKVa1sLOpCSlglM8UaLHaCHI/V40iP3sgOFyrZKHPkXKHiQGbiG3CbWWPPOgucibiTXGdmBG+QkwCHFwqw1qHnNb2b22kjFIG0I9d1sohBM//5drl8XwQSEx1KEzJRL6i8QLVhGALTB3fBlVb7PFyKIRVZexn4nEsQDViKPj6jaIbVckFik1CizzIsYjos6IhVkxKzxladHM1LxwpPanSVT5AAC2xuDFAt4zysydT93dh60bCeZbV3nN747mqGLu/YeyXoC5bmLxQ4+6TJcAKxa5JKss4x3wOcpIkZmlkrKT5Pstav2rfPt7/8CTeoEB+LCxVFmZf6uVVpD9CirXly9e1ssRJKQs7cXpuRrV+Wx2ePg1trroRCSl07MQ2LV7y2jobJ/opbV2TW5ibpfZA/B/1pRhutxTwCO8iqgixoBql/M0B2qaTkxOKcp8Ml7wWB1RyqYne8kcJyb2owfVuz2TDOnbUk51FCm8TnGnsJPnAEMvUooaCki9q6F2qi8drgIjis3oNFmqq1ZZ/yLX5NxiJTX9Idq+s+mkmXYMBgqQK9fVhRkAdraQkbiQatOf7qish5wj2T5UGVoeMuiXDuD+ypn4ogW16paFgE7JWuMoxUL7Z3lqF2qWaNS9IJGW+Fa0TzDslqLRxDuXctTnnazP32d7skVrqBhvvj3ONSIEwQjM5tc/bUGapau9q17WD79bF3QFu5JTV/zdc3Ugafefy0v6T2K2sitOmDkh7Psdn+D/NYIHd8lRQV9Xm9SQG2xhcDejhacZXWLDgNmLzIFbFEIH9pzyKhJK1Y26IAZbNNvKAOWiwV5ShokSGVLzTCuTkD5VEHXnF9WMOzi8jP98pxSJCNMGyJudH7kxV2mbzt8X/81PjhDyAETZu3l/1BB5GiaMNe/n9mI2XhdP+iamaWHR6PmWGsiC6ALAHTQlhqbk+qvs/B0t67+Okn7gFnBk1WxBMmCbGCuUtOYZqJa/rLN43IkWQdPwXJ4Ap2g89vNjKnucIfEANE458oR1m2SqKN/T8DuxHdRqxJsjbGzVYjKlZp6U5iIlyfqmBcQsL7pU7BFqz4lJhMgzQgmFoUdZV09a4ze0dgqvKWqSm86avK/+AioA9VpAQbuVWsk+rX4zTjwgA40nrktSl5wjfDVsW6divgr5a9xy6a7mgEF5otG9gAAAAIABVguNTA5AAADgjCCA34wggJmoAMCAQICAQcwDQYJKoZIhvcNAQEFBQAwHjEcMBoGA1UEAxMTbG9nZ2luZy1zaWduZXItdGVzdDAeFw0xNzA2MDkxNjQyMDVaFw0xOTA2MDkxNjQyMDVaMD4xEDAOBgNVBAoTB0xvZ2dpbmcxEjAQBgNVBAsTCU9wZW5TaGlmdDEWMBQGA1UEAxMNZWxhc3RpY3NlYXJjaDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJz3FAnMW3wSmtlP3m3ZaV25o8TZpniVoyhwb4sq4TIIzcaQdF9VTeHQhvjo5MgGlj/m/XTUsTN0nwoAdveM9dRcimGSYtA9q8y8rLc8ZggeUqOQ++WRiJYK9aEXcAIqZkTUsRrh1qQk1qzwi+tklT8GZsJ27MP2z/dYSqUfjRfAdBhdacE0iKEvQOZdGodwuRSgsw06jFaHmy7O8OTkZ7+gfYGjWMEADDnIz8wEDO8auXw9XVuESDHKT7e0GfutSHus2yXokodfPgfJv0erSb8a76zDgrrBPeozE0cjvw5vteQ8/vyPLcUZslYDIHiwUu9gEZrWQlxIkhRdugmyH48CAwEAAaOBpjCBozAOBgNVHQ8BAf8EBAMCBaAwCQYDVR0TBAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFG+4yiOcGXXTElUB7aMy52+BCx+0MAkGA1UdIwQCMAAwPQYDVR0RBDYwNIIJbG9jYWxob3N0hwR/AAABggpsb2dnaW5nLWVzgg5sb2dnaW5nLWVzLW9wc4gFKgMEBQUwDQYJKoZIhvcNAQEFBQADggEBAIzufh6l075HmNV0uODgHVQLPqJFTZPZdiOagUQbtlFkTWpvfu9CCbA+Ubp5tOD+2BoD/8RwQzqhSZQd0br9ujVvnXyS78T7AjyqHIjq5TSwdyyJ/ChoIqO6eDwEDWy3LI4BRMOf0lF9D3rhoaHcbdltHT2hpF48W+oQVx9gplmeul+PiCm6Hvtg1XGFL1YDkz+KNdm6CaLcfnF9o06XOkotvPwr32EnnNpNh3zm6uviIWPDFiC9FV3h6jeZmm7B4VDgc7gOJScm5c65AFBkhd9RW5HOcQr+CyRD3ABSbXgpbLpeThR6uXS1QRBuZpv/832TbZK+g05dhvXvoyZ0M9IABVguNTA5AAAC3jCCAtowggHCoAMCAQICAQEwDQYJKoZIhvcNAQELBQAwHjEcMBoGA1UEAxMTbG9nZ2luZy1zaWduZXItdGVzdDAeFw0xNzA2MDkxNjQxNDNaFw0yMjA2MDgxNjQxNDRaMB4xHDAaBgNVBAMTE2xvZ2dpbmctc2lnbmVyLXRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCotkApHA8txgsvkrLWrmXs36tMjkdsCteYTQIg0qRe9QiclvYG7Du4J4KPuu5b3PGUlzs1yQZgJBBxUwiMLB2EH+XI35hecIuZKXlBn3tGUHUbcBvXgXvfivla4wc4TC2QOXHF0SzTzsz0tRvWq8HbCdWDgHVu1u47NqqUMAytqkDfpcCPLs/3V9Sma6t2IFMhhG4sXeAmMCDlD3rwC2T5JJUhOr2olARReZ0gWK7IH2CKvCivxU0MPj8x/Irn05elencnZCF5mWpxUrWtd3mRdzN2KcDS5zBZPOvP0+QMSf17T+t4/SoOf6JcCy/NgBhI9M19TPTs/nDbwjaAb4z5AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCWPQ7ZWS4GwmgC9KOtSFoYmeOkxuquhXFxwMGzpdoSh9oj8afbzrMZhn23v1awT+i4Nrkry3iB8Lj49Odwv7QqHT0is9gaYMDWOY7TYVxMCBbYrmh1UyH3YGIe/HzJhtK6kSND5hyDploruPdoAUaMYlzH3RupNsasgksj8bg+0knjFzryWJANLAdlIHGAZutS4MNW6soeBf2DbknhQ5GmdQjUn+Qsdvw/S5QEHCSkkbuLGQqf1GW7Iz3KXf4ZAPQ6YgCNrALtnktQy8LdidWMhYZLqVkdqRm37X7GBzpIHL88+9UWNcegoa1zh7/dLnDKUcaEN2+2vE8fF146iirMAAAAAgAGc2lnLWNhAAABXI27+cAABVguNTA5AAAC3jCCAtowggHCoAMCAQICAQEwDQYJKoZIhvcNAQELBQAwHjEcMBoGA1UEAxMTbG9nZ2luZy1zaWduZXItdGVzdDAeFw0xNzA2MDkxNjQxNDNaFw0yMjA2MDgxNjQxNDRaMB4xHDAaBgNVBAMTE2xvZ2dpbmctc2lnbmVyLXRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCotkApHA8txgsvkrLWrmXs36tMjkdsCteYTQIg0qRe9QiclvYG7Du4J4KPuu5b3PGUlzs1yQZgJBBxUwiMLB2EH+XI35hecIuZKXlBn3tGUHUbcBvXgXvfivla4wc4TC2QOXHF0SzTzsz0tRvWq8HbCdWDgHVu1u47NqqUMAytqkDfpcCPLs/3V9Sma6t2IFMhhG4sXeAmMCDlD3rwC2T5JJUhOr2olARReZ0gWK7IH2CKvCivxU0MPj8x/Irn05elencnZCF5mWpxUrWtd3mRdzN2KcDS5zBZPOvP0+QMSf17T+t4/SoOf6JcCy/NgBhI9M19TPTs/nDbwjaAb4z5AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCWPQ7ZWS4GwmgC9KOtSFoYmeOkxuquhXFxwMGzpdoSh9oj8afbzrMZhn23v1awT+i4Nrkry3iB8Lj49Odwv7QqHT0is9gaYMDWOY7TYVxMCBbYrmh1UyH3YGIe/HzJhtK6kSND5hyDploruPdoAUaMYlzH3RupNsasgksj8bg+0knjFzryWJANLAdlIHGAZutS4MNW6soeBf2DbknhQ5GmdQjUn+Qsdvw/S5QEHCSkkbuLGQqf1GW7Iz3KXf4ZAPQ6YgCNrALtnktQy8LdidWMhYZLqVkdqRm37X7GBzpIHL88+9UWNcegoa1zh7/dLnDKUcaEN2+2vE8fF146iirMcfkuWHKodBqgTIlA5l2j3U1o9yk=", 
            "searchguard.truststore": "/u3+7QAAAAIAAAABAAAAAgAGc2lnLWNhAAABXI27/5YABVguNTA5AAAC3jCCAtowggHCoAMCAQICAQEwDQYJKoZIhvcNAQELBQAwHjEcMBoGA1UEAxMTbG9nZ2luZy1zaWduZXItdGVzdDAeFw0xNzA2MDkxNjQxNDNaFw0yMjA2MDgxNjQxNDRaMB4xHDAaBgNVBAMTE2xvZ2dpbmctc2lnbmVyLXRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCotkApHA8txgsvkrLWrmXs36tMjkdsCteYTQIg0qRe9QiclvYG7Du4J4KPuu5b3PGUlzs1yQZgJBBxUwiMLB2EH+XI35hecIuZKXlBn3tGUHUbcBvXgXvfivla4wc4TC2QOXHF0SzTzsz0tRvWq8HbCdWDgHVu1u47NqqUMAytqkDfpcCPLs/3V9Sma6t2IFMhhG4sXeAmMCDlD3rwC2T5JJUhOr2olARReZ0gWK7IH2CKvCivxU0MPj8x/Irn05elencnZCF5mWpxUrWtd3mRdzN2KcDS5zBZPOvP0+QMSf17T+t4/SoOf6JcCy/NgBhI9M19TPTs/nDbwjaAb4z5AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCWPQ7ZWS4GwmgC9KOtSFoYmeOkxuquhXFxwMGzpdoSh9oj8afbzrMZhn23v1awT+i4Nrkry3iB8Lj49Odwv7QqHT0is9gaYMDWOY7TYVxMCBbYrmh1UyH3YGIe/HzJhtK6kSND5hyDploruPdoAUaMYlzH3RupNsasgksj8bg+0knjFzryWJANLAdlIHGAZutS4MNW6soeBf2DbknhQ5GmdQjUn+Qsdvw/S5QEHCSkkbuLGQqf1GW7Iz3KXf4ZAPQ6YgCNrALtnktQy8LdidWMhYZLqVkdqRm37X7GBzpIHL88+9UWNcegoa1zh7/dLnDKUcaEN2+2vE8fF146iirMvPIJWUOxiBgM/fdTu6Zs6sIwU98=", 
            "truststore": "/u3+7QAAAAIAAAABAAAAAgAGc2lnLWNhAAABXI27/5YABVguNTA5AAAC3jCCAtowggHCoAMCAQICAQEwDQYJKoZIhvcNAQELBQAwHjEcMBoGA1UEAxMTbG9nZ2luZy1zaWduZXItdGVzdDAeFw0xNzA2MDkxNjQxNDNaFw0yMjA2MDgxNjQxNDRaMB4xHDAaBgNVBAMTE2xvZ2dpbmctc2lnbmVyLXRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCotkApHA8txgsvkrLWrmXs36tMjkdsCteYTQIg0qRe9QiclvYG7Du4J4KPuu5b3PGUlzs1yQZgJBBxUwiMLB2EH+XI35hecIuZKXlBn3tGUHUbcBvXgXvfivla4wc4TC2QOXHF0SzTzsz0tRvWq8HbCdWDgHVu1u47NqqUMAytqkDfpcCPLs/3V9Sma6t2IFMhhG4sXeAmMCDlD3rwC2T5JJUhOr2olARReZ0gWK7IH2CKvCivxU0MPj8x/Irn05elencnZCF5mWpxUrWtd3mRdzN2KcDS5zBZPOvP0+QMSf17T+t4/SoOf6JcCy/NgBhI9M19TPTs/nDbwjaAb4z5AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCWPQ7ZWS4GwmgC9KOtSFoYmeOkxuquhXFxwMGzpdoSh9oj8afbzrMZhn23v1awT+i4Nrkry3iB8Lj49Odwv7QqHT0is9gaYMDWOY7TYVxMCBbYrmh1UyH3YGIe/HzJhtK6kSND5hyDploruPdoAUaMYlzH3RupNsasgksj8bg+0knjFzryWJANLAdlIHGAZutS4MNW6soeBf2DbknhQ5GmdQjUn+Qsdvw/S5QEHCSkkbuLGQqf1GW7Iz3KXf4ZAPQ6YgCNrALtnktQy8LdidWMhYZLqVkdqRm37X7GBzpIHL88+9UWNcegoa1zh7/dLnDKUcaEN2+2vE8fF146iirMvPIJWUOxiBgM/fdTu6Zs6sIwU98="
        }, 
        "kind": "Secret", 
        "metadata": {
            "creationTimestamp": null, 
            "name": "logging-elasticsearch"
        }, 
        "type": "Opaque"
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : Set logging-es-ops-cluster service] ****
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:168
changed: [openshift] => {
    "changed": true, 
    "results": {
        "clusterip": "172.30.230.244", 
        "cmd": "/bin/oc get service logging-es-ops-cluster -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "Service", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:29Z", 
                    "name": "logging-es-ops-cluster", 
                    "namespace": "logging", 
                    "resourceVersion": "1626", 
                    "selfLink": "/api/v1/namespaces/logging/services/logging-es-ops-cluster", 
                    "uid": "a0741c02-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "clusterIP": "172.30.230.244", 
                    "ports": [
                        {
                            "port": 9300, 
                            "protocol": "TCP", 
                            "targetPort": 9300
                        }
                    ], 
                    "selector": {
                        "component": "es-ops", 
                        "provider": "openshift"
                    }, 
                    "sessionAffinity": "None", 
                    "type": "ClusterIP"
                }, 
                "status": {
                    "loadBalancer": {}
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : Set logging-es-ops service] ************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:182
changed: [openshift] => {
    "changed": true, 
    "results": {
        "clusterip": "172.30.217.237", 
        "cmd": "/bin/oc get service logging-es-ops -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "Service", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:30Z", 
                    "name": "logging-es-ops", 
                    "namespace": "logging", 
                    "resourceVersion": "1629", 
                    "selfLink": "/api/v1/namespaces/logging/services/logging-es-ops", 
                    "uid": "a143d396-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "clusterIP": "172.30.217.237", 
                    "ports": [
                        {
                            "port": 9200, 
                            "protocol": "TCP", 
                            "targetPort": "restapi"
                        }
                    ], 
                    "selector": {
                        "component": "es-ops", 
                        "provider": "openshift"
                    }, 
                    "sessionAffinity": "None", 
                    "type": "ClusterIP"
                }, 
                "status": {
                    "loadBalancer": {}
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : Creating ES storage template] **********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:197
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Creating ES storage template] **********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:210
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Set ES storage] ************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:225
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : set_fact] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:237
ok: [openshift] => {
    "ansible_facts": {
        "es_deploy_name": "logging-es-ops-data-master-n5j3wlaj"
    }, 
    "changed": false
}

TASK [openshift_logging_elasticsearch : set_fact] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:241
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_elasticsearch : Set ES dc templates] *******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:246
changed: [openshift] => {
    "changed": true, 
    "checksum": "9486a0fbb74467900f575eac081bc00046fd8546", 
    "dest": "/tmp/openshift-logging-ansible-3uEmM9/templates/logging-es-dc.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "940f63d659c8b8c151d5cdd5cc469d80", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 3179, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026551.42-146903010021539/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_elasticsearch : Set ES dc] *****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:262
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get dc logging-es-ops-data-master-n5j3wlaj -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "DeploymentConfig", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:32Z", 
                    "generation": 2, 
                    "labels": {
                        "component": "es-ops", 
                        "deployment": "logging-es-ops-data-master-n5j3wlaj", 
                        "logging-infra": "elasticsearch", 
                        "provider": "openshift"
                    }, 
                    "name": "logging-es-ops-data-master-n5j3wlaj", 
                    "namespace": "logging", 
                    "resourceVersion": "1644", 
                    "selfLink": "/oapi/v1/namespaces/logging/deploymentconfigs/logging-es-ops-data-master-n5j3wlaj", 
                    "uid": "a24e0ab1-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "replicas": 1, 
                    "selector": {
                        "component": "es-ops", 
                        "deployment": "logging-es-ops-data-master-n5j3wlaj", 
                        "logging-infra": "elasticsearch", 
                        "provider": "openshift"
                    }, 
                    "strategy": {
                        "activeDeadlineSeconds": 21600, 
                        "recreateParams": {
                            "timeoutSeconds": 600
                        }, 
                        "resources": {}, 
                        "type": "Recreate"
                    }, 
                    "template": {
                        "metadata": {
                            "creationTimestamp": null, 
                            "labels": {
                                "component": "es-ops", 
                                "deployment": "logging-es-ops-data-master-n5j3wlaj", 
                                "logging-infra": "elasticsearch", 
                                "provider": "openshift"
                            }, 
                            "name": "logging-es-ops-data-master-n5j3wlaj"
                        }, 
                        "spec": {
                            "containers": [
                                {
                                    "env": [
                                        {
                                            "name": "NAMESPACE", 
                                            "valueFrom": {
                                                "fieldRef": {
                                                    "apiVersion": "v1", 
                                                    "fieldPath": "metadata.namespace"
                                                }
                                            }
                                        }, 
                                        {
                                            "name": "KUBERNETES_TRUST_CERT", 
                                            "value": "true"
                                        }, 
                                        {
                                            "name": "SERVICE_DNS", 
                                            "value": "logging-es-ops-cluster"
                                        }, 
                                        {
                                            "name": "CLUSTER_NAME", 
                                            "value": "logging-es-ops"
                                        }, 
                                        {
                                            "name": "INSTANCE_RAM", 
                                            "value": "8Gi"
                                        }, 
                                        {
                                            "name": "NODE_QUORUM", 
                                            "value": "1"
                                        }, 
                                        {
                                            "name": "RECOVER_EXPECTED_NODES", 
                                            "value": "1"
                                        }, 
                                        {
                                            "name": "RECOVER_AFTER_TIME", 
                                            "value": "5m"
                                        }, 
                                        {
                                            "name": "READINESS_PROBE_TIMEOUT", 
                                            "value": "30"
                                        }, 
                                        {
                                            "name": "IS_MASTER", 
                                            "value": "true"
                                        }, 
                                        {
                                            "name": "HAS_DATA", 
                                            "value": "true"
                                        }
                                    ], 
                                    "image": "172.30.197.120:5000/logging/logging-elasticsearch:latest", 
                                    "imagePullPolicy": "Always", 
                                    "name": "elasticsearch", 
                                    "ports": [
                                        {
                                            "containerPort": 9200, 
                                            "name": "restapi", 
                                            "protocol": "TCP"
                                        }, 
                                        {
                                            "containerPort": 9300, 
                                            "name": "cluster", 
                                            "protocol": "TCP"
                                        }
                                    ], 
                                    "readinessProbe": {
                                        "exec": {
                                            "command": [
                                                "/usr/share/elasticsearch/probe/readiness.sh"
                                            ]
                                        }, 
                                        "failureThreshold": 3, 
                                        "initialDelaySeconds": 10, 
                                        "periodSeconds": 5, 
                                        "successThreshold": 1, 
                                        "timeoutSeconds": 30
                                    }, 
                                    "resources": {
                                        "limits": {
                                            "cpu": "1", 
                                            "memory": "8Gi"
                                        }, 
                                        "requests": {
                                            "memory": "512Mi"
                                        }
                                    }, 
                                    "terminationMessagePath": "/dev/termination-log", 
                                    "terminationMessagePolicy": "File", 
                                    "volumeMounts": [
                                        {
                                            "mountPath": "/etc/elasticsearch/secret", 
                                            "name": "elasticsearch", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/usr/share/java/elasticsearch/config", 
                                            "name": "elasticsearch-config", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/elasticsearch/persistent", 
                                            "name": "elasticsearch-storage"
                                        }
                                    ]
                                }
                            ], 
                            "dnsPolicy": "ClusterFirst", 
                            "restartPolicy": "Always", 
                            "schedulerName": "default-scheduler", 
                            "securityContext": {
                                "supplementalGroups": [
                                    65534
                                ]
                            }, 
                            "serviceAccount": "aggregated-logging-elasticsearch", 
                            "serviceAccountName": "aggregated-logging-elasticsearch", 
                            "terminationGracePeriodSeconds": 30, 
                            "volumes": [
                                {
                                    "name": "elasticsearch", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-elasticsearch"
                                    }
                                }, 
                                {
                                    "configMap": {
                                        "defaultMode": 420, 
                                        "name": "logging-elasticsearch"
                                    }, 
                                    "name": "elasticsearch-config"
                                }, 
                                {
                                    "emptyDir": {}, 
                                    "name": "elasticsearch-storage"
                                }
                            ]
                        }
                    }, 
                    "test": false, 
                    "triggers": [
                        {
                            "type": "ConfigChange"
                        }
                    ]
                }, 
                "status": {
                    "availableReplicas": 0, 
                    "conditions": [
                        {
                            "lastTransitionTime": "2017-06-09T16:42:32Z", 
                            "lastUpdateTime": "2017-06-09T16:42:32Z", 
                            "message": "Deployment config does not have minimum availability.", 
                            "status": "False", 
                            "type": "Available"
                        }, 
                        {
                            "lastTransitionTime": "2017-06-09T16:42:32Z", 
                            "lastUpdateTime": "2017-06-09T16:42:32Z", 
                            "message": "replication controller \"logging-es-ops-data-master-n5j3wlaj-1\" is waiting for pod \"logging-es-ops-data-master-n5j3wlaj-1-deploy\" to run", 
                            "status": "Unknown", 
                            "type": "Progressing"
                        }
                    ], 
                    "details": {
                        "causes": [
                            {
                                "type": "ConfigChange"
                            }
                        ], 
                        "message": "config change"
                    }, 
                    "latestVersion": 1, 
                    "observedGeneration": 2, 
                    "replicas": 0, 
                    "unavailableReplicas": 0, 
                    "updatedReplicas": 0
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_elasticsearch : Delete temp directory] *****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_elasticsearch/tasks/main.yaml:274
ok: [openshift] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-3uEmM9", 
    "state": "absent"
}

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:151
statically included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/determine_version.yaml

TASK [openshift_logging_kibana : fail] *****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/determine_version.yaml:3
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/determine_version.yaml:7
ok: [openshift] => {
    "ansible_facts": {
        "kibana_version": "3_5"
    }, 
    "changed": false
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/determine_version.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : fail] *****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/determine_version.yaml:15
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : Create temp directory for doing work in] ******
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:7
ok: [openshift] => {
    "changed": false, 
    "cmd": [
        "mktemp", 
        "-d", 
        "/tmp/openshift-logging-ansible-XXXXXX"
    ], 
    "delta": "0:00:01.005160", 
    "end": "2017-06-09 12:42:35.057500", 
    "rc": 0, 
    "start": "2017-06-09 12:42:34.052340"
}

STDOUT:

/tmp/openshift-logging-ansible-8ijN9q

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:12
ok: [openshift] => {
    "ansible_facts": {
        "tempdir": "/tmp/openshift-logging-ansible-8ijN9q"
    }, 
    "changed": false
}

TASK [openshift_logging_kibana : Create templates subdirectory] ****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:16
ok: [openshift] => {
    "changed": false, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/openshift-logging-ansible-8ijN9q/templates", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 40, 
    "state": "directory", 
    "uid": 0
}

TASK [openshift_logging_kibana : Create Kibana service account] ****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:26
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : Create Kibana service account] ****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:34
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get sa aggregated-logging-kibana -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "imagePullSecrets": [
                    {
                        "name": "aggregated-logging-kibana-dockercfg-38337"
                    }
                ], 
                "kind": "ServiceAccount", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:36Z", 
                    "name": "aggregated-logging-kibana", 
                    "namespace": "logging", 
                    "resourceVersion": "1668", 
                    "selfLink": "/api/v1/namespaces/logging/serviceaccounts/aggregated-logging-kibana", 
                    "uid": "a49a6153-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "secrets": [
                    {
                        "name": "aggregated-logging-kibana-dockercfg-38337"
                    }, 
                    {
                        "name": "aggregated-logging-kibana-token-cj4v7"
                    }
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:42
ok: [openshift] => {
    "ansible_facts": {
        "kibana_component": "kibana", 
        "kibana_name": "logging-kibana"
    }, 
    "changed": false
}

TASK [openshift_logging_kibana : Checking for session_secret] ******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:47
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging_kibana : Checking for oauth_secret] ********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:51
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "exists": false
    }
}

TASK [openshift_logging_kibana : Generate session secret] **********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:56
changed: [openshift] => {
    "changed": true, 
    "checksum": "e2cb7f419b4b2497855b4d071d457c4d0768f187", 
    "dest": "/etc/origin/logging/session_secret", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "8ba9a1e6d88c65a9253e945fc72c6938", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "system_u:object_r:etc_t:s0", 
    "size": 200, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026557.3-250313022054616/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_kibana : Generate oauth secret] ************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:64
changed: [openshift] => {
    "changed": true, 
    "checksum": "dadc9c52fa549b04e3ab5298b70a86ecbf1964a6", 
    "dest": "/etc/origin/logging/oauth_secret", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "0b507e53d57b8751a4f0f524817a6449", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "system_u:object_r:etc_t:s0", 
    "size": 64, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026557.64-210548021901621/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_kibana : Retrieving the cert to use when generating secrets for the logging components] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:71
ok: [openshift] => (item={u'name': u'ca_file', u'file': u'ca.crt'}) => {
    "changed": false, 
    "content": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMyakNDQWNLZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUyTkRFME0xb1hEVEl5TURZd09ERTJOREUwTkZvdwpIakVjTUJvR0ExVUVBeE1UYkc5bloybHVaeTF6YVdkdVpYSXRkR1Z6ZERDQ0FTSXdEUVlKS29aSWh2Y05BUUVCCkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUtpMlFDa2NEeTNHQ3krU3N0YXVaZXpmcTB5T1Iyd0sxNWhOQWlEU3BGNzEKQ0p5VzlnYnNPN2duZ28rNjdsdmM4WlNYT3pYSkJtQWtFSEZUQ0l3c0hZUWY1Y2pmbUY1d2k1a3BlVUdmZTBaUQpkUnR3RzllQmU5K0srVnJqQnpoTUxaQTVjY1hSTE5QT3pQUzFHOWFyd2RzSjFZT0FkVzdXN2pzMnFwUXdESzJxClFOK2x3STh1ei9kWDFLWnJxM1lnVXlHRWJpeGQ0Q1l3SU9VUGV2QUxaUGtrbFNFNnZhaVVCRkY1blNCWXJzZ2YKWUlxOEtLL0ZUUXcrUHpIOGl1ZlRsNlY2ZHlka0lYbVphbkZTdGExM2VaRjNNM1lwd05Mbk1Gazg2OC9UNUF4SgovWHRQNjNqOUtnNS9vbHdMTDgyQUdFajB6WDFNOU96K2NOdkNOb0J2alBrQ0F3RUFBYU1qTUNFd0RnWURWUjBQCkFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFKWTkKRHRsWkxnYkNhQUwwbzYxSVdoaVo0NlRHNnE2RmNYSEF3Yk9sMmhLSDJpUHhwOXZPc3htR2ZiZS9WckJQNkxnMgp1U3ZMZUlId3VQajA1M0MvdENvZFBTS3oyQnBnd05ZNWp0TmhYRXdJRnRpdWFIVlRJZmRnWWg3OGZNbUcwcnFSCkkwUG1ISU9tV2l1NDkyZ0JSb3hpWE1mZEc2azJ4cXlDU3lQeHVEN1NTZU1YT3ZKWWtBMHNCMlVnY1lCbTYxTGcKdzFicXloNEYvWU51U2VGRGthWjFDTlNmNUN4Mi9EOUxsQVFjSktTUnU0c1pDcC9VWmJzalBjcGQvaGtBOURwaQpBSTJzQXUyZVMxREx3dDJKMVl5RmhrdXBXUjJwR2JmdGZzWUhPa2djdnp6NzFSWTF4NkNoclhPSHY5MHVjTXBSCnhvUTNiN2E4VHg4WFhqcUtLc3c9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", 
    "encoding": "base64", 
    "item": {
        "file": "ca.crt", 
        "name": "ca_file"
    }, 
    "source": "/etc/origin/logging/ca.crt"
}
ok: [openshift] => (item={u'name': u'kibana_internal_key', u'file': u'kibana-internal.key'}) => {
    "changed": false, 
    "content": "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdzlKbmM1aWlXQ1hvb2xaRThjeUtNYndjYk9WQTVXN2RnWngvQkFIalV5VzNCRHdxCnc2QURpQkdNUXVubmlUM0tNN1QvWE1FOUJKcWRFbnBnSDlEZ0h4VVYyRzJpeGV1dy9UYzh4dTN0VjZ3bDFFTmUKa21id21LL0JpUXo1bTZMekw2N0pKSXJNV3ZGLzhTVXdxWXUrdG1WTllkUkhRMVM2NXFCMm54cTlVaXpZa3NWZwp6R254WnJwWFRGdHBXWU03UlVQY05paXp4MkpDUXpENlJCZk1KcmhmSjU0UmdtdCsrUmxTWnNrSERuVFlmVno0Cnloa2lHd2wrZXlYVjdtKzI5OVFXODJNNVZXbk91SWxuenk2dWlvczg2MmdEeGtYNFN1dlF6cjVrVXVCRTh6UVQKdWQ1Q25nbmVQeHRDK0p6aGh2aitMbGlDTHJmakV0M3VDbnVzdVFJREFRQUJBb0lCQUMvalZZOU1aVzgvSGd3TgpJdUVyRHlTb2h4OE9WWGVHVEQ4MUlVNitrM1F6QmZYUUxQa1lPYVRkL2JvamI0NzlnR1J5di94VCtYTVZ5SkhMClFaU21SUmowMUJuUWZmeis3WktQSHByYmRxNTVGK3MzNm5uTkdNdU9ON3NuWkNkdWl5QS9FMHVQY2ZoUGxPVkoKSGdSb2UyUXBrK24zdXRHNWQ5ZU1xRld5ZVZnTjE5cGJ4RGo5cXdJc0F1QVRoZXY2NkJ5RU5ia0hBalNwUXh5OApqZnB4U05HYkFQeUlyaE15bElUTjVFL2tVUThtZnBiaVJCNUw1ZEx4Y0hVdUV4T1N1L3BoVHNHbHFvZzZ1Z2hyCkRoNkdXVTdKdXhhVXdSWURVWVdCZTlZN3R4bnRDMTdvRngybkh0ZWFLVVZNL3R0QnhXTzRGcEdWdHArd05DWFgKN1kybWtIRUNnWUVBN1dLQkZiSkJFVHhTMWdLS2RrYW9jOG40QzN0UUNBbnprUno4SGhFK2pqM3NWSVdhMU5GSwo0MHpvYkNZbGoxbVZMMGFNRlgvRXhibFZKdUJUeEJqTHQ0c0g4UWwzcnNQTDV6cXFCZ09IS0hOT0t4WlpXU0RoClY1akN5aXhnMENYWEpSWGtaYWY1Ukh4OTVqamNmK1ZmVU13NHJlL0syK2xUVlRCOXcwcy9aNTBDZ1lFQTB5MkcKaGpqbzF2ZTh6T01xeEJiQk9JeE5aSktWSmlta2JjNlJWU21jcXZtZWxybE9naUR4dHp4bnMzcEdkUlJhQ043aQovRHN6NnUxNitnSkE1d3hpSGJsYUNOUGF4RWYyQWVTUDEvaUgrczI2NEJaVFh6bWQwWk9HSElHQUN4WEN0ZVpzCnE3R3JTa2RyTmN6K1Z4ZmxCRFR2K1BEZk5GeGhlVnV3a2Fid1JNMENnWUFob2JRU0k5Z09IaExLV1QvT3RBSUgKclpTajQ4dVo1L2NIRnlrdEJWcHNncDlwOWlmU2dQaHdMYVE4TmtkYUFPSWUrWXE5UFFYZTBYd2I3dXBQUFFnWApyKzNuRzJWR2NGMENISkNjRzRIWm5FUjEzRXU2VWVzN0l0eHVFazJCRCt0a1NLNjhMRWtuaDgrNmYxSFdHT1FoCktWM2FYci9KMmt2QkpRYWNWWnZkRFFLQmdIWHgyRStBMjNLc3ZQTUY4VzNCYzd0Y2lTRWh5VkdlTW1ydDB6Zm8KWE1zVlRhREs4SEUzYXlrM0FJZzZRNzNDVnlMYk1aVkVqNU9uZldiQytYTFI5TDY3TmZEMk9ob2tRdDc5dytVUQp0d201KzZTS2M1Z2N1dXRCWk1icEFSV3YycWJ6Z2tCR28rK0JnZnh6ZUo0MkJ6L1pQU29oaGRoZHZFY21rYkJYCnQ1d0ZBb0dCQU5saVJrcG93dHpPWG5ocFlLSlhZR3lFeUc0RjJ1K1ZuWUtJdEVBVk8weDlaaWtuTGYzc3llK0gKQTgrRVJGbmFKZTI3dk9udERmaGRRTW8vSVJ5RlllVEswdGJWWTlUSVBoTGNoVk5PVm5zY2F3V3BsQmRzNlVTcQpQTnNuREE1NTJ5UWVRZWh5Zm9zKzE3K2pmc3RlaWw0OVNKWXd5M3loTFcyQVFsQTZwMDQ1Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==", 
    "encoding": "base64", 
    "item": {
        "file": "kibana-internal.key", 
        "name": "kibana_internal_key"
    }, 
    "source": "/etc/origin/logging/kibana-internal.key"
}
ok: [openshift] => (item={u'name': u'kibana_internal_cert', u'file': u'kibana-internal.crt'}) => {
    "changed": false, 
    "content": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURUakNDQWphZ0F3SUJBZ0lCQWpBTkJna3Foa2lHOXcwQkFRc0ZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUyTkRFME5Wb1hEVEU1TURZd09URTJOREUwTmxvdwpGakVVTUJJR0ExVUVBeE1MSUd0cFltRnVZUzF2Y0hNd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3CmdnRUtBb0lCQVFERDBtZHptS0pZSmVpaVZrVHh6SW94dkJ4czVVRGxidDJCbkg4RUFlTlRKYmNFUENyRG9BT0kKRVl4QzZlZUpQY296dFA5Y3dUMEVtcDBTZW1BZjBPQWZGUlhZYmFMRjY3RDlOenpHN2UxWHJDWFVRMTZTWnZDWQpyOEdKRFBtYm92TXZyc2traXN4YThYL3hKVENwaTc2MlpVMWgxRWREVkxybW9IYWZHcjFTTE5pU3hXRE1hZkZtCnVsZE1XMmxaZ3p0RlE5dzJLTFBIWWtKRE1QcEVGOHdtdUY4bm5oR0NhMzc1R1ZKbXlRY09kTmg5WFBqS0dTSWIKQ1g1N0pkWHViN2IzMUJiell6bFZhYzY0aVdmUExxNktpenpyYUFQR1JmaEs2OURPdm1SUzRFVHpOQk81M2tLZQpDZDQvRzBMNG5PR0crUDR1V0lJdXQrTVMzZTRLZTZ5NUFnTUJBQUdqZ1o0d2dac3dEZ1lEVlIwUEFRSC9CQVFECkFnV2dNQk1HQTFVZEpRUU1NQW9HQ0NzR0FRVUZCd01CTUF3R0ExVWRFd0VCL3dRQ01BQXdaZ1lEVlIwUkJGOHcKWFlJTElHdHBZbUZ1WVMxdmNIT0NMQ0JyYVdKaGJtRXRiM0J6TG5KdmRYUmxjaTVrWldaaGRXeDBMbk4yWXk1agpiSFZ6ZEdWeUxteHZZMkZzZ2hnZ2EybGlZVzVoTGpFeU55NHdMakF1TVM1NGFYQXVhVytDQm10cFltRnVZVEFOCkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQVl4empONGk3V01qbHFad0lRUTlsOWZ1Y0NKbXFxWU9PQVNoeU1QaTIKNGJreW8xUkgxTEozNGljMk04NkpvZHNoaUoxTHNCODdiVllOdHFMRnAwMlE5VlpkTFdWZzBtaWNtZDNNUlEvSApHVCsyUktjeXVidThoV3VFdDJoRUNma20vSkpQRFRKVEF0UFhlWG54bEh4dXE2cFNlb053dnJxMWRSSkd6d1pjCmhTcDUxdTBnYnZHR1YzYVJGQkIzdHNDQlBmdWZYWUQ0djFRYTFLNkRWVmJqOWJyYUhrM0pTSktRWHRRelRmY28KeEFNbWNVa2w1TWRwblViVC9XeTlRRjJlaVZmNlJvU29HZWNzMmh6Uk5iTUE4cTEwMm5Bd2phbndpUWZ4OUE1NgowZ1EwY1RrdjlpN1R0VUg1MG55TW9oUUpGUkU2Zk1RSDFmdEJ6aHVKaUN3QVpnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJQzJqQ0NBY0tnQXdJQkFnSUJBVEFOQmdrcWhraUc5dzBCQVFzRkFEQWVNUnd3R2dZRFZRUURFeE5zYjJkbgphVzVuTFhOcFoyNWxjaTEwWlhOME1CNFhEVEUzTURZd09URTJOREUwTTFvWERUSXlNRFl3T0RFMk5ERTBORm93CkhqRWNNQm9HQTFVRUF4TVRiRzluWjJsdVp5MXphV2R1WlhJdGRHVnpkRENDQVNJd0RRWUpLb1pJaHZjTkFRRUIKQlFBRGdnRVBBRENDQVFvQ2dnRUJBS2kyUUNrY0R5M0dDeStTc3RhdVplemZxMHlPUjJ3SzE1aE5BaURTcEY3MQpDSnlXOWdic083Z25nbys2N2x2YzhaU1hPelhKQm1Ba0VIRlRDSXdzSFlRZjVjamZtRjV3aTVrcGVVR2ZlMFpRCmRSdHdHOWVCZTkrSytWcmpCemhNTFpBNWNjWFJMTlBPelBTMUc5YXJ3ZHNKMVlPQWRXN1c3anMycXBRd0RLMnEKUU4rbHdJOHV6L2RYMUtacnEzWWdVeUdFYml4ZDRDWXdJT1VQZXZBTFpQa2tsU0U2dmFpVUJGRjVuU0JZcnNnZgpZSXE4S0svRlRRdytQekg4aXVmVGw2VjZkeWRrSVhtWmFuRlN0YTEzZVpGM00zWXB3TkxuTUZrODY4L1Q1QXhKCi9YdFA2M2o5S2c1L29sd0xMODJBR0VqMHpYMU05T3orY052Q05vQnZqUGtDQXdFQUFhTWpNQ0V3RGdZRFZSMFAKQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUpZOQpEdGxaTGdiQ2FBTDBvNjFJV2hpWjQ2VEc2cTZGY1hIQXdiT2wyaEtIMmlQeHA5dk9zeG1HZmJlL1ZyQlA2TGcyCnVTdkxlSUh3dVBqMDUzQy90Q29kUFNLejJCcGd3Tlk1anROaFhFd0lGdGl1YUhWVElmZGdZaDc4Zk1tRzBycVIKSTBQbUhJT21XaXU0OTJnQlJveGlYTWZkRzZrMnhxeUNTeVB4dUQ3U1NlTVhPdkpZa0Ewc0IyVWdjWUJtNjFMZwp3MWJxeWg0Ri9ZTnVTZUZEa2FaMUNOU2Y1Q3gyL0Q5TGxBUWNKS1NSdTRzWkNwL1VaYnNqUGNwZC9oa0E5RHBpCkFJMnNBdTJlUzFETHd0MkoxWXlGaGt1cFdSMnBHYmZ0ZnNZSE9rZ2N2eno3MVJZMXg2Q2hyWE9IdjkwdWNNcFIKeG9RM2I3YThUeDhYWGpxS0tzdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", 
    "encoding": "base64", 
    "item": {
        "file": "kibana-internal.crt", 
        "name": "kibana_internal_cert"
    }, 
    "source": "/etc/origin/logging/kibana-internal.crt"
}
ok: [openshift] => (item={u'name': u'server_tls', u'file': u'server-tls.json'}) => {
    "changed": false, 
    "content": "Ly8gU2VlIGZvciBhdmFpbGFibGUgb3B0aW9uczogaHR0cHM6Ly9ub2RlanMub3JnL2FwaS90bHMuaHRtbCN0bHNfdGxzX2NyZWF0ZXNlcnZlcl9vcHRpb25zX3NlY3VyZWNvbm5lY3Rpb25saXN0ZW5lcgp0bHNfb3B0aW9ucyA9IHsKCWNpcGhlcnM6ICdrRUVDREg6K2tFRUNESCtTSEE6a0VESDora0VESCtTSEE6K2tFREgrQ0FNRUxMSUE6a0VDREg6K2tFQ0RIK1NIQTprUlNBOitrUlNBK1NIQTora1JTQStDQU1FTExJQTohYU5VTEw6IWVOVUxMOiFTU0x2MjohUkM0OiFERVM6IUVYUDohU0VFRDohSURFQTorM0RFUycsCglob25vckNpcGhlck9yZGVyOiB0cnVlCn0K", 
    "encoding": "base64", 
    "item": {
        "file": "server-tls.json", 
        "name": "server_tls"
    }, 
    "source": "/etc/origin/logging/server-tls.json"
}
ok: [openshift] => (item={u'name': u'session_secret', u'file': u'session_secret'}) => {
    "changed": false, 
    "content": "UU4wM3FyVU1qUFJyUVkwQ25Pcm1ZMkVKdjhtV2FHSXlEc0lhdGltY0Zzb1VGYWlEZHQ2U3BzYTdMUXRERmRudm5EQmlkTVA5bGU1c1BqZ3BsU2d0NmlnSTJlTTV1MjlDQmZqeGdPeGRMVmRsNE8yVnJSQXhBbkczZ3BVOHpHYlNrcjNtd2JzbzgwZUlrekIyTzFpSlZDNEJtVjZVY3VURVJJZFJjQXVtY0ZrdHFiV09odFphNzVqV2JlOW1MZnFhc3h0YW1TSlM=", 
    "encoding": "base64", 
    "item": {
        "file": "session_secret", 
        "name": "session_secret"
    }, 
    "source": "/etc/origin/logging/session_secret"
}
ok: [openshift] => (item={u'name': u'oauth_secret', u'file': u'oauth_secret'}) => {
    "changed": false, 
    "content": "QW9oSGlWbTZIU3hGQmRzemYwYlZVaWNOa2VkaGFiVTNvZ1lrWTdNb1paTEFhMTgycXhldmpreUI0cjh1ZWdWbA==", 
    "encoding": "base64", 
    "item": {
        "file": "oauth_secret", 
        "name": "oauth_secret"
    }, 
    "source": "/etc/origin/logging/oauth_secret"
}

TASK [openshift_logging_kibana : Set logging-kibana service] *******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:84
changed: [openshift] => {
    "changed": true, 
    "results": {
        "clusterip": "172.30.209.208", 
        "cmd": "/bin/oc get service logging-kibana -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "Service", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:40Z", 
                    "name": "logging-kibana", 
                    "namespace": "logging", 
                    "resourceVersion": "1679", 
                    "selfLink": "/api/v1/namespaces/logging/services/logging-kibana", 
                    "uid": "a6e7fbc1-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "clusterIP": "172.30.209.208", 
                    "ports": [
                        {
                            "port": 443, 
                            "protocol": "TCP", 
                            "targetPort": "oaproxy"
                        }
                    ], 
                    "selector": {
                        "component": "kibana", 
                        "provider": "openshift"
                    }, 
                    "sessionAffinity": "None", 
                    "type": "ClusterIP"
                }, 
                "status": {
                    "loadBalancer": {}
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:101
 [WARNING]: when statements should not include jinja2 templating delimiters
such as {{ }} or {% %}. Found: {{ openshift_logging_kibana_key | trim | length
> 0 }}
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:106
 [WARNING]: when statements should not include jinja2 templating delimiters
such as {{ }} or {% %}. Found: {{ openshift_logging_kibana_cert | trim | length
> 0 }}
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:111
 [WARNING]: when statements should not include jinja2 templating delimiters
such as {{ }} or {% %}. Found: {{ openshift_logging_kibana_ca | trim | length >
0 }}
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:116
ok: [openshift] => {
    "ansible_facts": {
        "kibana_ca": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMyakNDQWNLZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUyTkRFME0xb1hEVEl5TURZd09ERTJOREUwTkZvdwpIakVjTUJvR0ExVUVBeE1UYkc5bloybHVaeTF6YVdkdVpYSXRkR1Z6ZERDQ0FTSXdEUVlKS29aSWh2Y05BUUVCCkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUtpMlFDa2NEeTNHQ3krU3N0YXVaZXpmcTB5T1Iyd0sxNWhOQWlEU3BGNzEKQ0p5VzlnYnNPN2duZ28rNjdsdmM4WlNYT3pYSkJtQWtFSEZUQ0l3c0hZUWY1Y2pmbUY1d2k1a3BlVUdmZTBaUQpkUnR3RzllQmU5K0srVnJqQnpoTUxaQTVjY1hSTE5QT3pQUzFHOWFyd2RzSjFZT0FkVzdXN2pzMnFwUXdESzJxClFOK2x3STh1ei9kWDFLWnJxM1lnVXlHRWJpeGQ0Q1l3SU9VUGV2QUxaUGtrbFNFNnZhaVVCRkY1blNCWXJzZ2YKWUlxOEtLL0ZUUXcrUHpIOGl1ZlRsNlY2ZHlka0lYbVphbkZTdGExM2VaRjNNM1lwd05Mbk1Gazg2OC9UNUF4SgovWHRQNjNqOUtnNS9vbHdMTDgyQUdFajB6WDFNOU96K2NOdkNOb0J2alBrQ0F3RUFBYU1qTUNFd0RnWURWUjBQCkFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFKWTkKRHRsWkxnYkNhQUwwbzYxSVdoaVo0NlRHNnE2RmNYSEF3Yk9sMmhLSDJpUHhwOXZPc3htR2ZiZS9WckJQNkxnMgp1U3ZMZUlId3VQajA1M0MvdENvZFBTS3oyQnBnd05ZNWp0TmhYRXdJRnRpdWFIVlRJZmRnWWg3OGZNbUcwcnFSCkkwUG1ISU9tV2l1NDkyZ0JSb3hpWE1mZEc2azJ4cXlDU3lQeHVEN1NTZU1YT3ZKWWtBMHNCMlVnY1lCbTYxTGcKdzFicXloNEYvWU51U2VGRGthWjFDTlNmNUN4Mi9EOUxsQVFjSktTUnU0c1pDcC9VWmJzalBjcGQvaGtBOURwaQpBSTJzQXUyZVMxREx3dDJKMVl5RmhrdXBXUjJwR2JmdGZzWUhPa2djdnp6NzFSWTF4NkNoclhPSHY5MHVjTXBSCnhvUTNiN2E4VHg4WFhqcUtLc3c9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K"
    }, 
    "changed": false
}

TASK [openshift_logging_kibana : Generating Kibana route template] *************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:121
ok: [openshift] => {
    "changed": false, 
    "checksum": "eb1d0a84f4f092d705d3c2370375704117017c44", 
    "dest": "/tmp/openshift-logging-ansible-8ijN9q/templates/kibana-route.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "06cc6afa6989839c9ff01c31934e7727", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 2714, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026560.97-210911998785489/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_kibana : Setting Kibana route] *************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:141
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get route logging-kibana -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "Route", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:41Z", 
                    "labels": {
                        "component": "support", 
                        "logging-infra": "support", 
                        "provider": "openshift"
                    }, 
                    "name": "logging-kibana", 
                    "namespace": "logging", 
                    "resourceVersion": "1685", 
                    "selfLink": "/oapi/v1/namespaces/logging/routes/logging-kibana", 
                    "uid": "a7f779b9-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "host": "kibana.router.default.svc.cluster.local", 
                    "tls": {
                        "caCertificate": "-----BEGIN CERTIFICATE-----\nMIIC2jCCAcKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAeMRwwGgYDVQQDExNsb2dn\naW5nLXNpZ25lci10ZXN0MB4XDTE3MDYwOTE2NDE0M1oXDTIyMDYwODE2NDE0NFow\nHjEcMBoGA1UEAxMTbG9nZ2luZy1zaWduZXItdGVzdDCCASIwDQYJKoZIhvcNAQEB\nBQADggEPADCCAQoCggEBAKi2QCkcDy3GCy+SstauZezfq0yOR2wK15hNAiDSpF71\nCJyW9gbsO7gngo+67lvc8ZSXOzXJBmAkEHFTCIwsHYQf5cjfmF5wi5kpeUGfe0ZQ\ndRtwG9eBe9+K+VrjBzhMLZA5ccXRLNPOzPS1G9arwdsJ1YOAdW7W7js2qpQwDK2q\nQN+lwI8uz/dX1KZrq3YgUyGEbixd4CYwIOUPevALZPkklSE6vaiUBFF5nSBYrsgf\nYIq8KK/FTQw+PzH8iufTl6V6dydkIXmZanFSta13eZF3M3YpwNLnMFk868/T5AxJ\n/XtP63j9Kg5/olwLL82AGEj0zX1M9Oz+cNvCNoBvjPkCAwEAAaMjMCEwDgYDVR0P\nAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJY9\nDtlZLgbCaAL0o61IWhiZ46TG6q6FcXHAwbOl2hKH2iPxp9vOsxmGfbe/VrBP6Lg2\nuSvLeIHwuPj053C/tCodPSKz2BpgwNY5jtNhXEwIFtiuaHVTIfdgYh78fMmG0rqR\nI0PmHIOmWiu492gBRoxiXMfdG6k2xqyCSyPxuD7SSeMXOvJYkA0sB2UgcYBm61Lg\nw1bqyh4F/YNuSeFDkaZ1CNSf5Cx2/D9LlAQcJKSRu4sZCp/UZbsjPcpd/hkA9Dpi\nAI2sAu2eS1DLwt2J1YyFhkupWR2pGbftfsYHOkgcvzz71RY1x6ChrXOHv90ucMpR\nxoQ3b7a8Tx8XXjqKKsw=\n-----END CERTIFICATE-----\n", 
                        "destinationCACertificate": "-----BEGIN CERTIFICATE-----\nMIIC2jCCAcKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAeMRwwGgYDVQQDExNsb2dn\naW5nLXNpZ25lci10ZXN0MB4XDTE3MDYwOTE2NDE0M1oXDTIyMDYwODE2NDE0NFow\nHjEcMBoGA1UEAxMTbG9nZ2luZy1zaWduZXItdGVzdDCCASIwDQYJKoZIhvcNAQEB\nBQADggEPADCCAQoCggEBAKi2QCkcDy3GCy+SstauZezfq0yOR2wK15hNAiDSpF71\nCJyW9gbsO7gngo+67lvc8ZSXOzXJBmAkEHFTCIwsHYQf5cjfmF5wi5kpeUGfe0ZQ\ndRtwG9eBe9+K+VrjBzhMLZA5ccXRLNPOzPS1G9arwdsJ1YOAdW7W7js2qpQwDK2q\nQN+lwI8uz/dX1KZrq3YgUyGEbixd4CYwIOUPevALZPkklSE6vaiUBFF5nSBYrsgf\nYIq8KK/FTQw+PzH8iufTl6V6dydkIXmZanFSta13eZF3M3YpwNLnMFk868/T5AxJ\n/XtP63j9Kg5/olwLL82AGEj0zX1M9Oz+cNvCNoBvjPkCAwEAAaMjMCEwDgYDVR0P\nAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJY9\nDtlZLgbCaAL0o61IWhiZ46TG6q6FcXHAwbOl2hKH2iPxp9vOsxmGfbe/VrBP6Lg2\nuSvLeIHwuPj053C/tCodPSKz2BpgwNY5jtNhXEwIFtiuaHVTIfdgYh78fMmG0rqR\nI0PmHIOmWiu492gBRoxiXMfdG6k2xqyCSyPxuD7SSeMXOvJYkA0sB2UgcYBm61Lg\nw1bqyh4F/YNuSeFDkaZ1CNSf5Cx2/D9LlAQcJKSRu4sZCp/UZbsjPcpd/hkA9Dpi\nAI2sAu2eS1DLwt2J1YyFhkupWR2pGbftfsYHOkgcvzz71RY1x6ChrXOHv90ucMpR\nxoQ3b7a8Tx8XXjqKKsw=\n-----END CERTIFICATE-----\n", 
                        "insecureEdgeTerminationPolicy": "Redirect", 
                        "termination": "reencrypt"
                    }, 
                    "to": {
                        "kind": "Service", 
                        "name": "logging-kibana", 
                        "weight": 100
                    }, 
                    "wildcardPolicy": "None"
                }, 
                "status": {
                    "ingress": [
                        {
                            "conditions": [
                                {
                                    "lastTransitionTime": "2017-06-09T16:42:41Z", 
                                    "status": "True", 
                                    "type": "Admitted"
                                }
                            ], 
                            "host": "kibana.router.default.svc.cluster.local", 
                            "routerName": "router", 
                            "wildcardPolicy": "None"
                        }
                    ]
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : Get current oauthclient hostnames] ************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:151
ok: [openshift] => {
    "changed": false, 
    "results": {
        "cmd": "/bin/oc get oauthclient kibana-proxy -o json -n logging", 
        "results": [
            {}
        ], 
        "returncode": 0, 
        "stderr": "Error from server (NotFound): oauthclients.oauth.openshift.io \"kibana-proxy\" not found\n", 
        "stdout": ""
    }, 
    "state": "list"
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:159
ok: [openshift] => {
    "ansible_facts": {
        "proxy_hostnames": [
            "https://kibana.router.default.svc.cluster.local"
        ]
    }, 
    "changed": false
}

TASK [openshift_logging_kibana : Create oauth-client template] *****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:162
changed: [openshift] => {
    "changed": true, 
    "checksum": "d2f71c58b353878f99b21658c24a8f6cb594ac3d", 
    "dest": "/tmp/openshift-logging-ansible-8ijN9q/templates/oauth-client.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "accb938880e10dad34015404a741873b", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 328, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026563.4-11835215125561/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_kibana : Set kibana-proxy oauth-client] ****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:170
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get oauthclient kibana-proxy -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "OAuthClient", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:45Z", 
                    "labels": {
                        "logging-infra": "support"
                    }, 
                    "name": "kibana-proxy", 
                    "resourceVersion": "1697", 
                    "selfLink": "/oapi/v1/oauthclients/kibana-proxy", 
                    "uid": "a9d71d4c-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "redirectURIs": [
                    "https://kibana.router.default.svc.cluster.local"
                ], 
                "scopeRestrictions": [
                    {
                        "literals": [
                            "user:info", 
                            "user:check-access", 
                            "user:list-projects"
                        ]
                    }
                ], 
                "secret": "AohHiVm6HSxFBdszf0bVUicNkedhabU3ogYkY7MoZZLAa182qxevjkyB4r8uegVl"
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : Set Kibana secret] ****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:181
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc secrets new logging-kibana ca=/etc/origin/logging/ca.crt key=/etc/origin/logging/system.logging.kibana.key cert=/etc/origin/logging/system.logging.kibana.crt -n logging", 
        "results": "", 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : Set Kibana Proxy secret] **********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:195
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc secrets new logging-kibana-proxy oauth-secret=/tmp/oauth-secret-CA9ETy session-secret=/tmp/session-secret-x8WgZ0 server-key=/tmp/server-key-qNUyAp server-cert=/tmp/server-cert-op1Kfw server-tls.json=/tmp/server-tls.json-5pKO8S -n logging", 
        "results": "", 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : Generate Kibana DC template] ******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:221
changed: [openshift] => {
    "changed": true, 
    "checksum": "efcf69f3659d666fbfb4895989fcfe2be5e4172b", 
    "dest": "/tmp/openshift-logging-ansible-8ijN9q/templates/kibana-dc.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "5ba737c82a0879a212d3ee3372dfa39e", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 3735, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026568.22-182601042740767/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_kibana : Set Kibana DC] ********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:240
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get dc logging-kibana -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "DeploymentConfig", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:49Z", 
                    "generation": 2, 
                    "labels": {
                        "component": "kibana", 
                        "logging-infra": "kibana", 
                        "provider": "openshift"
                    }, 
                    "name": "logging-kibana", 
                    "namespace": "logging", 
                    "resourceVersion": "1713", 
                    "selfLink": "/oapi/v1/namespaces/logging/deploymentconfigs/logging-kibana", 
                    "uid": "ac9e1be9-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "replicas": 1, 
                    "selector": {
                        "component": "kibana", 
                        "logging-infra": "kibana", 
                        "provider": "openshift"
                    }, 
                    "strategy": {
                        "activeDeadlineSeconds": 21600, 
                        "resources": {}, 
                        "rollingParams": {
                            "intervalSeconds": 1, 
                            "maxSurge": "25%", 
                            "maxUnavailable": "25%", 
                            "timeoutSeconds": 600, 
                            "updatePeriodSeconds": 1
                        }, 
                        "type": "Rolling"
                    }, 
                    "template": {
                        "metadata": {
                            "creationTimestamp": null, 
                            "labels": {
                                "component": "kibana", 
                                "logging-infra": "kibana", 
                                "provider": "openshift"
                            }, 
                            "name": "logging-kibana"
                        }, 
                        "spec": {
                            "containers": [
                                {
                                    "env": [
                                        {
                                            "name": "ES_HOST", 
                                            "value": "logging-es"
                                        }, 
                                        {
                                            "name": "ES_PORT", 
                                            "value": "9200"
                                        }, 
                                        {
                                            "name": "KIBANA_MEMORY_LIMIT", 
                                            "valueFrom": {
                                                "resourceFieldRef": {
                                                    "containerName": "kibana", 
                                                    "divisor": "0", 
                                                    "resource": "limits.memory"
                                                }
                                            }
                                        }
                                    ], 
                                    "image": "172.30.197.120:5000/logging/logging-kibana:latest", 
                                    "imagePullPolicy": "Always", 
                                    "name": "kibana", 
                                    "readinessProbe": {
                                        "exec": {
                                            "command": [
                                                "/usr/share/kibana/probe/readiness.sh"
                                            ]
                                        }, 
                                        "failureThreshold": 3, 
                                        "initialDelaySeconds": 5, 
                                        "periodSeconds": 5, 
                                        "successThreshold": 1, 
                                        "timeoutSeconds": 4
                                    }, 
                                    "resources": {
                                        "limits": {
                                            "memory": "736Mi"
                                        }
                                    }, 
                                    "terminationMessagePath": "/dev/termination-log", 
                                    "terminationMessagePolicy": "File", 
                                    "volumeMounts": [
                                        {
                                            "mountPath": "/etc/kibana/keys", 
                                            "name": "kibana", 
                                            "readOnly": true
                                        }
                                    ]
                                }, 
                                {
                                    "env": [
                                        {
                                            "name": "OAP_BACKEND_URL", 
                                            "value": "http://localhost:5601"
                                        }, 
                                        {
                                            "name": "OAP_AUTH_MODE", 
                                            "value": "oauth2"
                                        }, 
                                        {
                                            "name": "OAP_TRANSFORM", 
                                            "value": "user_header,token_header"
                                        }, 
                                        {
                                            "name": "OAP_OAUTH_ID", 
                                            "value": "kibana-proxy"
                                        }, 
                                        {
                                            "name": "OAP_MASTER_URL", 
                                            "value": "https://kubernetes.default.svc.cluster.local"
                                        }, 
                                        {
                                            "name": "OAP_PUBLIC_MASTER_URL", 
                                            "value": "https://172.18.7.3:8443"
                                        }, 
                                        {
                                            "name": "OAP_LOGOUT_REDIRECT", 
                                            "value": "https://172.18.7.3:8443/console/logout"
                                        }, 
                                        {
                                            "name": "OAP_MASTER_CA_FILE", 
                                            "value": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
                                        }, 
                                        {
                                            "name": "OAP_DEBUG", 
                                            "value": "False"
                                        }, 
                                        {
                                            "name": "OAP_OAUTH_SECRET_FILE", 
                                            "value": "/secret/oauth-secret"
                                        }, 
                                        {
                                            "name": "OAP_SERVER_CERT_FILE", 
                                            "value": "/secret/server-cert"
                                        }, 
                                        {
                                            "name": "OAP_SERVER_KEY_FILE", 
                                            "value": "/secret/server-key"
                                        }, 
                                        {
                                            "name": "OAP_SERVER_TLS_FILE", 
                                            "value": "/secret/server-tls.json"
                                        }, 
                                        {
                                            "name": "OAP_SESSION_SECRET_FILE", 
                                            "value": "/secret/session-secret"
                                        }, 
                                        {
                                            "name": "OCP_AUTH_PROXY_MEMORY_LIMIT", 
                                            "valueFrom": {
                                                "resourceFieldRef": {
                                                    "containerName": "kibana-proxy", 
                                                    "divisor": "0", 
                                                    "resource": "limits.memory"
                                                }
                                            }
                                        }
                                    ], 
                                    "image": "172.30.197.120:5000/logging/logging-auth-proxy:latest", 
                                    "imagePullPolicy": "Always", 
                                    "name": "kibana-proxy", 
                                    "ports": [
                                        {
                                            "containerPort": 3000, 
                                            "name": "oaproxy", 
                                            "protocol": "TCP"
                                        }
                                    ], 
                                    "resources": {
                                        "limits": {
                                            "memory": "96Mi"
                                        }
                                    }, 
                                    "terminationMessagePath": "/dev/termination-log", 
                                    "terminationMessagePolicy": "File", 
                                    "volumeMounts": [
                                        {
                                            "mountPath": "/secret", 
                                            "name": "kibana-proxy", 
                                            "readOnly": true
                                        }
                                    ]
                                }
                            ], 
                            "dnsPolicy": "ClusterFirst", 
                            "restartPolicy": "Always", 
                            "schedulerName": "default-scheduler", 
                            "securityContext": {}, 
                            "serviceAccount": "aggregated-logging-kibana", 
                            "serviceAccountName": "aggregated-logging-kibana", 
                            "terminationGracePeriodSeconds": 30, 
                            "volumes": [
                                {
                                    "name": "kibana", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-kibana"
                                    }
                                }, 
                                {
                                    "name": "kibana-proxy", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-kibana-proxy"
                                    }
                                }
                            ]
                        }
                    }, 
                    "test": false, 
                    "triggers": [
                        {
                            "type": "ConfigChange"
                        }
                    ]
                }, 
                "status": {
                    "availableReplicas": 0, 
                    "conditions": [
                        {
                            "lastTransitionTime": "2017-06-09T16:42:49Z", 
                            "lastUpdateTime": "2017-06-09T16:42:49Z", 
                            "message": "Deployment config does not have minimum availability.", 
                            "status": "False", 
                            "type": "Available"
                        }, 
                        {
                            "lastTransitionTime": "2017-06-09T16:42:49Z", 
                            "lastUpdateTime": "2017-06-09T16:42:49Z", 
                            "message": "replication controller \"logging-kibana-1\" is waiting for pod \"logging-kibana-1-deploy\" to run", 
                            "status": "Unknown", 
                            "type": "Progressing"
                        }
                    ], 
                    "details": {
                        "causes": [
                            {
                                "type": "ConfigChange"
                            }
                        ], 
                        "message": "config change"
                    }, 
                    "latestVersion": 1, 
                    "observedGeneration": 2, 
                    "replicas": 0, 
                    "unavailableReplicas": 0, 
                    "updatedReplicas": 0
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : Delete temp directory] ************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:252
ok: [openshift] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-8ijN9q", 
    "state": "absent"
}

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:166
statically included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/determine_version.yaml

TASK [openshift_logging_kibana : fail] *****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/determine_version.yaml:3
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/determine_version.yaml:7
ok: [openshift] => {
    "ansible_facts": {
        "kibana_version": "3_5"
    }, 
    "changed": false
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/determine_version.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : fail] *****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/determine_version.yaml:15
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : Create temp directory for doing work in] ******
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:7
ok: [openshift] => {
    "changed": false, 
    "cmd": [
        "mktemp", 
        "-d", 
        "/tmp/openshift-logging-ansible-XXXXXX"
    ], 
    "delta": "0:00:00.003609", 
    "end": "2017-06-09 12:42:51.961162", 
    "rc": 0, 
    "start": "2017-06-09 12:42:51.957553"
}

STDOUT:

/tmp/openshift-logging-ansible-Pq0tEz

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:12
ok: [openshift] => {
    "ansible_facts": {
        "tempdir": "/tmp/openshift-logging-ansible-Pq0tEz"
    }, 
    "changed": false
}

TASK [openshift_logging_kibana : Create templates subdirectory] ****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:16
ok: [openshift] => {
    "changed": false, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/openshift-logging-ansible-Pq0tEz/templates", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 40, 
    "state": "directory", 
    "uid": 0
}

TASK [openshift_logging_kibana : Create Kibana service account] ****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:26
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : Create Kibana service account] ****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:34
ok: [openshift] => {
    "changed": false, 
    "results": {
        "cmd": "/bin/oc get sa aggregated-logging-kibana -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "imagePullSecrets": [
                    {
                        "name": "aggregated-logging-kibana-dockercfg-38337"
                    }
                ], 
                "kind": "ServiceAccount", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:36Z", 
                    "name": "aggregated-logging-kibana", 
                    "namespace": "logging", 
                    "resourceVersion": "1668", 
                    "selfLink": "/api/v1/namespaces/logging/serviceaccounts/aggregated-logging-kibana", 
                    "uid": "a49a6153-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "secrets": [
                    {
                        "name": "aggregated-logging-kibana-dockercfg-38337"
                    }, 
                    {
                        "name": "aggregated-logging-kibana-token-cj4v7"
                    }
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:42
ok: [openshift] => {
    "ansible_facts": {
        "kibana_component": "kibana-ops", 
        "kibana_name": "logging-kibana-ops"
    }, 
    "changed": false
}

TASK [openshift_logging_kibana : Checking for session_secret] ******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:47
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "atime": 1497026558.630553, 
        "attr_flags": "", 
        "attributes": [], 
        "block_size": 4096, 
        "blocks": 8, 
        "charset": "us-ascii", 
        "checksum": "e2cb7f419b4b2497855b4d071d457c4d0768f187", 
        "ctime": 1497026557.4455621, 
        "dev": 51714, 
        "device_type": 0, 
        "executable": false, 
        "exists": true, 
        "gid": 0, 
        "gr_name": "root", 
        "inode": 176166799, 
        "isblk": false, 
        "ischr": false, 
        "isdir": false, 
        "isfifo": false, 
        "isgid": false, 
        "islnk": false, 
        "isreg": true, 
        "issock": false, 
        "isuid": false, 
        "md5": "8ba9a1e6d88c65a9253e945fc72c6938", 
        "mimetype": "text/plain", 
        "mode": "0644", 
        "mtime": 1497026557.3175633, 
        "nlink": 1, 
        "path": "/etc/origin/logging/session_secret", 
        "pw_name": "root", 
        "readable": true, 
        "rgrp": true, 
        "roth": true, 
        "rusr": true, 
        "size": 200, 
        "uid": 0, 
        "version": "430413523", 
        "wgrp": false, 
        "woth": false, 
        "writeable": true, 
        "wusr": true, 
        "xgrp": false, 
        "xoth": false, 
        "xusr": false
    }
}

TASK [openshift_logging_kibana : Checking for oauth_secret] ********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:51
ok: [openshift] => {
    "changed": false, 
    "stat": {
        "atime": 1497026558.773552, 
        "attr_flags": "", 
        "attributes": [], 
        "block_size": 4096, 
        "blocks": 8, 
        "charset": "us-ascii", 
        "checksum": "dadc9c52fa549b04e3ab5298b70a86ecbf1964a6", 
        "ctime": 1497026557.8115594, 
        "dev": 51714, 
        "device_type": 0, 
        "executable": false, 
        "exists": true, 
        "gid": 0, 
        "gr_name": "root", 
        "inode": 185097600, 
        "isblk": false, 
        "ischr": false, 
        "isdir": false, 
        "isfifo": false, 
        "isgid": false, 
        "islnk": false, 
        "isreg": true, 
        "issock": false, 
        "isuid": false, 
        "md5": "0b507e53d57b8751a4f0f524817a6449", 
        "mimetype": "text/plain", 
        "mode": "0644", 
        "mtime": 1497026557.6745605, 
        "nlink": 1, 
        "path": "/etc/origin/logging/oauth_secret", 
        "pw_name": "root", 
        "readable": true, 
        "rgrp": true, 
        "roth": true, 
        "rusr": true, 
        "size": 64, 
        "uid": 0, 
        "version": "18446744072205045025", 
        "wgrp": false, 
        "woth": false, 
        "writeable": true, 
        "wusr": true, 
        "xgrp": false, 
        "xoth": false, 
        "xusr": false
    }
}

TASK [openshift_logging_kibana : Generate session secret] **********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:56
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : Generate oauth secret] ************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:64
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : Retrieving the cert to use when generating secrets for the logging components] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:71
ok: [openshift] => (item={u'name': u'ca_file', u'file': u'ca.crt'}) => {
    "changed": false, 
    "content": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMyakNDQWNLZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUyTkRFME0xb1hEVEl5TURZd09ERTJOREUwTkZvdwpIakVjTUJvR0ExVUVBeE1UYkc5bloybHVaeTF6YVdkdVpYSXRkR1Z6ZERDQ0FTSXdEUVlKS29aSWh2Y05BUUVCCkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUtpMlFDa2NEeTNHQ3krU3N0YXVaZXpmcTB5T1Iyd0sxNWhOQWlEU3BGNzEKQ0p5VzlnYnNPN2duZ28rNjdsdmM4WlNYT3pYSkJtQWtFSEZUQ0l3c0hZUWY1Y2pmbUY1d2k1a3BlVUdmZTBaUQpkUnR3RzllQmU5K0srVnJqQnpoTUxaQTVjY1hSTE5QT3pQUzFHOWFyd2RzSjFZT0FkVzdXN2pzMnFwUXdESzJxClFOK2x3STh1ei9kWDFLWnJxM1lnVXlHRWJpeGQ0Q1l3SU9VUGV2QUxaUGtrbFNFNnZhaVVCRkY1blNCWXJzZ2YKWUlxOEtLL0ZUUXcrUHpIOGl1ZlRsNlY2ZHlka0lYbVphbkZTdGExM2VaRjNNM1lwd05Mbk1Gazg2OC9UNUF4SgovWHRQNjNqOUtnNS9vbHdMTDgyQUdFajB6WDFNOU96K2NOdkNOb0J2alBrQ0F3RUFBYU1qTUNFd0RnWURWUjBQCkFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFKWTkKRHRsWkxnYkNhQUwwbzYxSVdoaVo0NlRHNnE2RmNYSEF3Yk9sMmhLSDJpUHhwOXZPc3htR2ZiZS9WckJQNkxnMgp1U3ZMZUlId3VQajA1M0MvdENvZFBTS3oyQnBnd05ZNWp0TmhYRXdJRnRpdWFIVlRJZmRnWWg3OGZNbUcwcnFSCkkwUG1ISU9tV2l1NDkyZ0JSb3hpWE1mZEc2azJ4cXlDU3lQeHVEN1NTZU1YT3ZKWWtBMHNCMlVnY1lCbTYxTGcKdzFicXloNEYvWU51U2VGRGthWjFDTlNmNUN4Mi9EOUxsQVFjSktTUnU0c1pDcC9VWmJzalBjcGQvaGtBOURwaQpBSTJzQXUyZVMxREx3dDJKMVl5RmhrdXBXUjJwR2JmdGZzWUhPa2djdnp6NzFSWTF4NkNoclhPSHY5MHVjTXBSCnhvUTNiN2E4VHg4WFhqcUtLc3c9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", 
    "encoding": "base64", 
    "item": {
        "file": "ca.crt", 
        "name": "ca_file"
    }, 
    "source": "/etc/origin/logging/ca.crt"
}
ok: [openshift] => (item={u'name': u'kibana_internal_key', u'file': u'kibana-internal.key'}) => {
    "changed": false, 
    "content": "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdzlKbmM1aWlXQ1hvb2xaRThjeUtNYndjYk9WQTVXN2RnWngvQkFIalV5VzNCRHdxCnc2QURpQkdNUXVubmlUM0tNN1QvWE1FOUJKcWRFbnBnSDlEZ0h4VVYyRzJpeGV1dy9UYzh4dTN0VjZ3bDFFTmUKa21id21LL0JpUXo1bTZMekw2N0pKSXJNV3ZGLzhTVXdxWXUrdG1WTllkUkhRMVM2NXFCMm54cTlVaXpZa3NWZwp6R254WnJwWFRGdHBXWU03UlVQY05paXp4MkpDUXpENlJCZk1KcmhmSjU0UmdtdCsrUmxTWnNrSERuVFlmVno0Cnloa2lHd2wrZXlYVjdtKzI5OVFXODJNNVZXbk91SWxuenk2dWlvczg2MmdEeGtYNFN1dlF6cjVrVXVCRTh6UVQKdWQ1Q25nbmVQeHRDK0p6aGh2aitMbGlDTHJmakV0M3VDbnVzdVFJREFRQUJBb0lCQUMvalZZOU1aVzgvSGd3TgpJdUVyRHlTb2h4OE9WWGVHVEQ4MUlVNitrM1F6QmZYUUxQa1lPYVRkL2JvamI0NzlnR1J5di94VCtYTVZ5SkhMClFaU21SUmowMUJuUWZmeis3WktQSHByYmRxNTVGK3MzNm5uTkdNdU9ON3NuWkNkdWl5QS9FMHVQY2ZoUGxPVkoKSGdSb2UyUXBrK24zdXRHNWQ5ZU1xRld5ZVZnTjE5cGJ4RGo5cXdJc0F1QVRoZXY2NkJ5RU5ia0hBalNwUXh5OApqZnB4U05HYkFQeUlyaE15bElUTjVFL2tVUThtZnBiaVJCNUw1ZEx4Y0hVdUV4T1N1L3BoVHNHbHFvZzZ1Z2hyCkRoNkdXVTdKdXhhVXdSWURVWVdCZTlZN3R4bnRDMTdvRngybkh0ZWFLVVZNL3R0QnhXTzRGcEdWdHArd05DWFgKN1kybWtIRUNnWUVBN1dLQkZiSkJFVHhTMWdLS2RrYW9jOG40QzN0UUNBbnprUno4SGhFK2pqM3NWSVdhMU5GSwo0MHpvYkNZbGoxbVZMMGFNRlgvRXhibFZKdUJUeEJqTHQ0c0g4UWwzcnNQTDV6cXFCZ09IS0hOT0t4WlpXU0RoClY1akN5aXhnMENYWEpSWGtaYWY1Ukh4OTVqamNmK1ZmVU13NHJlL0syK2xUVlRCOXcwcy9aNTBDZ1lFQTB5MkcKaGpqbzF2ZTh6T01xeEJiQk9JeE5aSktWSmlta2JjNlJWU21jcXZtZWxybE9naUR4dHp4bnMzcEdkUlJhQ043aQovRHN6NnUxNitnSkE1d3hpSGJsYUNOUGF4RWYyQWVTUDEvaUgrczI2NEJaVFh6bWQwWk9HSElHQUN4WEN0ZVpzCnE3R3JTa2RyTmN6K1Z4ZmxCRFR2K1BEZk5GeGhlVnV3a2Fid1JNMENnWUFob2JRU0k5Z09IaExLV1QvT3RBSUgKclpTajQ4dVo1L2NIRnlrdEJWcHNncDlwOWlmU2dQaHdMYVE4TmtkYUFPSWUrWXE5UFFYZTBYd2I3dXBQUFFnWApyKzNuRzJWR2NGMENISkNjRzRIWm5FUjEzRXU2VWVzN0l0eHVFazJCRCt0a1NLNjhMRWtuaDgrNmYxSFdHT1FoCktWM2FYci9KMmt2QkpRYWNWWnZkRFFLQmdIWHgyRStBMjNLc3ZQTUY4VzNCYzd0Y2lTRWh5VkdlTW1ydDB6Zm8KWE1zVlRhREs4SEUzYXlrM0FJZzZRNzNDVnlMYk1aVkVqNU9uZldiQytYTFI5TDY3TmZEMk9ob2tRdDc5dytVUQp0d201KzZTS2M1Z2N1dXRCWk1icEFSV3YycWJ6Z2tCR28rK0JnZnh6ZUo0MkJ6L1pQU29oaGRoZHZFY21rYkJYCnQ1d0ZBb0dCQU5saVJrcG93dHpPWG5ocFlLSlhZR3lFeUc0RjJ1K1ZuWUtJdEVBVk8weDlaaWtuTGYzc3llK0gKQTgrRVJGbmFKZTI3dk9udERmaGRRTW8vSVJ5RlllVEswdGJWWTlUSVBoTGNoVk5PVm5zY2F3V3BsQmRzNlVTcQpQTnNuREE1NTJ5UWVRZWh5Zm9zKzE3K2pmc3RlaWw0OVNKWXd5M3loTFcyQVFsQTZwMDQ1Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==", 
    "encoding": "base64", 
    "item": {
        "file": "kibana-internal.key", 
        "name": "kibana_internal_key"
    }, 
    "source": "/etc/origin/logging/kibana-internal.key"
}
ok: [openshift] => (item={u'name': u'kibana_internal_cert', u'file': u'kibana-internal.crt'}) => {
    "changed": false, 
    "content": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURUakNDQWphZ0F3SUJBZ0lCQWpBTkJna3Foa2lHOXcwQkFRc0ZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUyTkRFME5Wb1hEVEU1TURZd09URTJOREUwTmxvdwpGakVVTUJJR0ExVUVBeE1MSUd0cFltRnVZUzF2Y0hNd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3CmdnRUtBb0lCQVFERDBtZHptS0pZSmVpaVZrVHh6SW94dkJ4czVVRGxidDJCbkg4RUFlTlRKYmNFUENyRG9BT0kKRVl4QzZlZUpQY296dFA5Y3dUMEVtcDBTZW1BZjBPQWZGUlhZYmFMRjY3RDlOenpHN2UxWHJDWFVRMTZTWnZDWQpyOEdKRFBtYm92TXZyc2traXN4YThYL3hKVENwaTc2MlpVMWgxRWREVkxybW9IYWZHcjFTTE5pU3hXRE1hZkZtCnVsZE1XMmxaZ3p0RlE5dzJLTFBIWWtKRE1QcEVGOHdtdUY4bm5oR0NhMzc1R1ZKbXlRY09kTmg5WFBqS0dTSWIKQ1g1N0pkWHViN2IzMUJiell6bFZhYzY0aVdmUExxNktpenpyYUFQR1JmaEs2OURPdm1SUzRFVHpOQk81M2tLZQpDZDQvRzBMNG5PR0crUDR1V0lJdXQrTVMzZTRLZTZ5NUFnTUJBQUdqZ1o0d2dac3dEZ1lEVlIwUEFRSC9CQVFECkFnV2dNQk1HQTFVZEpRUU1NQW9HQ0NzR0FRVUZCd01CTUF3R0ExVWRFd0VCL3dRQ01BQXdaZ1lEVlIwUkJGOHcKWFlJTElHdHBZbUZ1WVMxdmNIT0NMQ0JyYVdKaGJtRXRiM0J6TG5KdmRYUmxjaTVrWldaaGRXeDBMbk4yWXk1agpiSFZ6ZEdWeUxteHZZMkZzZ2hnZ2EybGlZVzVoTGpFeU55NHdMakF1TVM1NGFYQXVhVytDQm10cFltRnVZVEFOCkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQVl4empONGk3V01qbHFad0lRUTlsOWZ1Y0NKbXFxWU9PQVNoeU1QaTIKNGJreW8xUkgxTEozNGljMk04NkpvZHNoaUoxTHNCODdiVllOdHFMRnAwMlE5VlpkTFdWZzBtaWNtZDNNUlEvSApHVCsyUktjeXVidThoV3VFdDJoRUNma20vSkpQRFRKVEF0UFhlWG54bEh4dXE2cFNlb053dnJxMWRSSkd6d1pjCmhTcDUxdTBnYnZHR1YzYVJGQkIzdHNDQlBmdWZYWUQ0djFRYTFLNkRWVmJqOWJyYUhrM0pTSktRWHRRelRmY28KeEFNbWNVa2w1TWRwblViVC9XeTlRRjJlaVZmNlJvU29HZWNzMmh6Uk5iTUE4cTEwMm5Bd2phbndpUWZ4OUE1NgowZ1EwY1RrdjlpN1R0VUg1MG55TW9oUUpGUkU2Zk1RSDFmdEJ6aHVKaUN3QVpnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJQzJqQ0NBY0tnQXdJQkFnSUJBVEFOQmdrcWhraUc5dzBCQVFzRkFEQWVNUnd3R2dZRFZRUURFeE5zYjJkbgphVzVuTFhOcFoyNWxjaTEwWlhOME1CNFhEVEUzTURZd09URTJOREUwTTFvWERUSXlNRFl3T0RFMk5ERTBORm93CkhqRWNNQm9HQTFVRUF4TVRiRzluWjJsdVp5MXphV2R1WlhJdGRHVnpkRENDQVNJd0RRWUpLb1pJaHZjTkFRRUIKQlFBRGdnRVBBRENDQVFvQ2dnRUJBS2kyUUNrY0R5M0dDeStTc3RhdVplemZxMHlPUjJ3SzE1aE5BaURTcEY3MQpDSnlXOWdic083Z25nbys2N2x2YzhaU1hPelhKQm1Ba0VIRlRDSXdzSFlRZjVjamZtRjV3aTVrcGVVR2ZlMFpRCmRSdHdHOWVCZTkrSytWcmpCemhNTFpBNWNjWFJMTlBPelBTMUc5YXJ3ZHNKMVlPQWRXN1c3anMycXBRd0RLMnEKUU4rbHdJOHV6L2RYMUtacnEzWWdVeUdFYml4ZDRDWXdJT1VQZXZBTFpQa2tsU0U2dmFpVUJGRjVuU0JZcnNnZgpZSXE4S0svRlRRdytQekg4aXVmVGw2VjZkeWRrSVhtWmFuRlN0YTEzZVpGM00zWXB3TkxuTUZrODY4L1Q1QXhKCi9YdFA2M2o5S2c1L29sd0xMODJBR0VqMHpYMU05T3orY052Q05vQnZqUGtDQXdFQUFhTWpNQ0V3RGdZRFZSMFAKQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUpZOQpEdGxaTGdiQ2FBTDBvNjFJV2hpWjQ2VEc2cTZGY1hIQXdiT2wyaEtIMmlQeHA5dk9zeG1HZmJlL1ZyQlA2TGcyCnVTdkxlSUh3dVBqMDUzQy90Q29kUFNLejJCcGd3Tlk1anROaFhFd0lGdGl1YUhWVElmZGdZaDc4Zk1tRzBycVIKSTBQbUhJT21XaXU0OTJnQlJveGlYTWZkRzZrMnhxeUNTeVB4dUQ3U1NlTVhPdkpZa0Ewc0IyVWdjWUJtNjFMZwp3MWJxeWg0Ri9ZTnVTZUZEa2FaMUNOU2Y1Q3gyL0Q5TGxBUWNKS1NSdTRzWkNwL1VaYnNqUGNwZC9oa0E5RHBpCkFJMnNBdTJlUzFETHd0MkoxWXlGaGt1cFdSMnBHYmZ0ZnNZSE9rZ2N2eno3MVJZMXg2Q2hyWE9IdjkwdWNNcFIKeG9RM2I3YThUeDhYWGpxS0tzdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", 
    "encoding": "base64", 
    "item": {
        "file": "kibana-internal.crt", 
        "name": "kibana_internal_cert"
    }, 
    "source": "/etc/origin/logging/kibana-internal.crt"
}
ok: [openshift] => (item={u'name': u'server_tls', u'file': u'server-tls.json'}) => {
    "changed": false, 
    "content": "Ly8gU2VlIGZvciBhdmFpbGFibGUgb3B0aW9uczogaHR0cHM6Ly9ub2RlanMub3JnL2FwaS90bHMuaHRtbCN0bHNfdGxzX2NyZWF0ZXNlcnZlcl9vcHRpb25zX3NlY3VyZWNvbm5lY3Rpb25saXN0ZW5lcgp0bHNfb3B0aW9ucyA9IHsKCWNpcGhlcnM6ICdrRUVDREg6K2tFRUNESCtTSEE6a0VESDora0VESCtTSEE6K2tFREgrQ0FNRUxMSUE6a0VDREg6K2tFQ0RIK1NIQTprUlNBOitrUlNBK1NIQTora1JTQStDQU1FTExJQTohYU5VTEw6IWVOVUxMOiFTU0x2MjohUkM0OiFERVM6IUVYUDohU0VFRDohSURFQTorM0RFUycsCglob25vckNpcGhlck9yZGVyOiB0cnVlCn0K", 
    "encoding": "base64", 
    "item": {
        "file": "server-tls.json", 
        "name": "server_tls"
    }, 
    "source": "/etc/origin/logging/server-tls.json"
}
ok: [openshift] => (item={u'name': u'session_secret', u'file': u'session_secret'}) => {
    "changed": false, 
    "content": "UU4wM3FyVU1qUFJyUVkwQ25Pcm1ZMkVKdjhtV2FHSXlEc0lhdGltY0Zzb1VGYWlEZHQ2U3BzYTdMUXRERmRudm5EQmlkTVA5bGU1c1BqZ3BsU2d0NmlnSTJlTTV1MjlDQmZqeGdPeGRMVmRsNE8yVnJSQXhBbkczZ3BVOHpHYlNrcjNtd2JzbzgwZUlrekIyTzFpSlZDNEJtVjZVY3VURVJJZFJjQXVtY0ZrdHFiV09odFphNzVqV2JlOW1MZnFhc3h0YW1TSlM=", 
    "encoding": "base64", 
    "item": {
        "file": "session_secret", 
        "name": "session_secret"
    }, 
    "source": "/etc/origin/logging/session_secret"
}
ok: [openshift] => (item={u'name': u'oauth_secret', u'file': u'oauth_secret'}) => {
    "changed": false, 
    "content": "QW9oSGlWbTZIU3hGQmRzemYwYlZVaWNOa2VkaGFiVTNvZ1lrWTdNb1paTEFhMTgycXhldmpreUI0cjh1ZWdWbA==", 
    "encoding": "base64", 
    "item": {
        "file": "oauth_secret", 
        "name": "oauth_secret"
    }, 
    "source": "/etc/origin/logging/oauth_secret"
}

TASK [openshift_logging_kibana : Set logging-kibana-ops service] ***************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:84
changed: [openshift] => {
    "changed": true, 
    "results": {
        "clusterip": "172.30.193.185", 
        "cmd": "/bin/oc get service logging-kibana-ops -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "Service", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:55Z", 
                    "name": "logging-kibana-ops", 
                    "namespace": "logging", 
                    "resourceVersion": "1734", 
                    "selfLink": "/api/v1/namespaces/logging/services/logging-kibana-ops", 
                    "uid": "b03f3b3c-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "clusterIP": "172.30.193.185", 
                    "ports": [
                        {
                            "port": 443, 
                            "protocol": "TCP", 
                            "targetPort": "oaproxy"
                        }
                    ], 
                    "selector": {
                        "component": "kibana-ops", 
                        "provider": "openshift"
                    }, 
                    "sessionAffinity": "None", 
                    "type": "ClusterIP"
                }, 
                "status": {
                    "loadBalancer": {}
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:101
 [WARNING]: when statements should not include jinja2 templating delimiters
such as {{ }} or {% %}. Found: {{ openshift_logging_kibana_key | trim | length
> 0 }}
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:106
 [WARNING]: when statements should not include jinja2 templating delimiters
such as {{ }} or {% %}. Found: {{ openshift_logging_kibana_cert | trim | length
> 0 }}
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:111
 [WARNING]: when statements should not include jinja2 templating delimiters
such as {{ }} or {% %}. Found: {{ openshift_logging_kibana_ca | trim | length >
0 }}
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:116
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_kibana : Generating Kibana route template] *************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:121
ok: [openshift] => {
    "changed": false, 
    "checksum": "be8f5e0383b9d104957c096549143d3e4c8aa838", 
    "dest": "/tmp/openshift-logging-ansible-Pq0tEz/templates/kibana-route.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "e9c81cb8c9583b2fe5b10a2e7e2d2006", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 2726, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026576.44-157027343132276/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_kibana : Setting Kibana route] *************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:141
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get route logging-kibana-ops -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "Route", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:57Z", 
                    "labels": {
                        "component": "support", 
                        "logging-infra": "support", 
                        "provider": "openshift"
                    }, 
                    "name": "logging-kibana-ops", 
                    "namespace": "logging", 
                    "resourceVersion": "1742", 
                    "selfLink": "/oapi/v1/namespaces/logging/routes/logging-kibana-ops", 
                    "uid": "b1427d0f-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "host": "kibana-ops.router.default.svc.cluster.local", 
                    "tls": {
                        "caCertificate": "-----BEGIN CERTIFICATE-----\nMIIC2jCCAcKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAeMRwwGgYDVQQDExNsb2dn\naW5nLXNpZ25lci10ZXN0MB4XDTE3MDYwOTE2NDE0M1oXDTIyMDYwODE2NDE0NFow\nHjEcMBoGA1UEAxMTbG9nZ2luZy1zaWduZXItdGVzdDCCASIwDQYJKoZIhvcNAQEB\nBQADggEPADCCAQoCggEBAKi2QCkcDy3GCy+SstauZezfq0yOR2wK15hNAiDSpF71\nCJyW9gbsO7gngo+67lvc8ZSXOzXJBmAkEHFTCIwsHYQf5cjfmF5wi5kpeUGfe0ZQ\ndRtwG9eBe9+K+VrjBzhMLZA5ccXRLNPOzPS1G9arwdsJ1YOAdW7W7js2qpQwDK2q\nQN+lwI8uz/dX1KZrq3YgUyGEbixd4CYwIOUPevALZPkklSE6vaiUBFF5nSBYrsgf\nYIq8KK/FTQw+PzH8iufTl6V6dydkIXmZanFSta13eZF3M3YpwNLnMFk868/T5AxJ\n/XtP63j9Kg5/olwLL82AGEj0zX1M9Oz+cNvCNoBvjPkCAwEAAaMjMCEwDgYDVR0P\nAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJY9\nDtlZLgbCaAL0o61IWhiZ46TG6q6FcXHAwbOl2hKH2iPxp9vOsxmGfbe/VrBP6Lg2\nuSvLeIHwuPj053C/tCodPSKz2BpgwNY5jtNhXEwIFtiuaHVTIfdgYh78fMmG0rqR\nI0PmHIOmWiu492gBRoxiXMfdG6k2xqyCSyPxuD7SSeMXOvJYkA0sB2UgcYBm61Lg\nw1bqyh4F/YNuSeFDkaZ1CNSf5Cx2/D9LlAQcJKSRu4sZCp/UZbsjPcpd/hkA9Dpi\nAI2sAu2eS1DLwt2J1YyFhkupWR2pGbftfsYHOkgcvzz71RY1x6ChrXOHv90ucMpR\nxoQ3b7a8Tx8XXjqKKsw=\n-----END CERTIFICATE-----\n", 
                        "destinationCACertificate": "-----BEGIN CERTIFICATE-----\nMIIC2jCCAcKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAeMRwwGgYDVQQDExNsb2dn\naW5nLXNpZ25lci10ZXN0MB4XDTE3MDYwOTE2NDE0M1oXDTIyMDYwODE2NDE0NFow\nHjEcMBoGA1UEAxMTbG9nZ2luZy1zaWduZXItdGVzdDCCASIwDQYJKoZIhvcNAQEB\nBQADggEPADCCAQoCggEBAKi2QCkcDy3GCy+SstauZezfq0yOR2wK15hNAiDSpF71\nCJyW9gbsO7gngo+67lvc8ZSXOzXJBmAkEHFTCIwsHYQf5cjfmF5wi5kpeUGfe0ZQ\ndRtwG9eBe9+K+VrjBzhMLZA5ccXRLNPOzPS1G9arwdsJ1YOAdW7W7js2qpQwDK2q\nQN+lwI8uz/dX1KZrq3YgUyGEbixd4CYwIOUPevALZPkklSE6vaiUBFF5nSBYrsgf\nYIq8KK/FTQw+PzH8iufTl6V6dydkIXmZanFSta13eZF3M3YpwNLnMFk868/T5AxJ\n/XtP63j9Kg5/olwLL82AGEj0zX1M9Oz+cNvCNoBvjPkCAwEAAaMjMCEwDgYDVR0P\nAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJY9\nDtlZLgbCaAL0o61IWhiZ46TG6q6FcXHAwbOl2hKH2iPxp9vOsxmGfbe/VrBP6Lg2\nuSvLeIHwuPj053C/tCodPSKz2BpgwNY5jtNhXEwIFtiuaHVTIfdgYh78fMmG0rqR\nI0PmHIOmWiu492gBRoxiXMfdG6k2xqyCSyPxuD7SSeMXOvJYkA0sB2UgcYBm61Lg\nw1bqyh4F/YNuSeFDkaZ1CNSf5Cx2/D9LlAQcJKSRu4sZCp/UZbsjPcpd/hkA9Dpi\nAI2sAu2eS1DLwt2J1YyFhkupWR2pGbftfsYHOkgcvzz71RY1x6ChrXOHv90ucMpR\nxoQ3b7a8Tx8XXjqKKsw=\n-----END CERTIFICATE-----\n", 
                        "insecureEdgeTerminationPolicy": "Redirect", 
                        "termination": "reencrypt"
                    }, 
                    "to": {
                        "kind": "Service", 
                        "name": "logging-kibana-ops", 
                        "weight": 100
                    }, 
                    "wildcardPolicy": "None"
                }, 
                "status": {
                    "ingress": [
                        {
                            "conditions": [
                                {
                                    "lastTransitionTime": "2017-06-09T16:42:57Z", 
                                    "status": "True", 
                                    "type": "Admitted"
                                }
                            ], 
                            "host": "kibana-ops.router.default.svc.cluster.local", 
                            "routerName": "router", 
                            "wildcardPolicy": "None"
                        }
                    ]
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : Get current oauthclient hostnames] ************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:151
ok: [openshift] => {
    "changed": false, 
    "results": {
        "cmd": "/bin/oc get oauthclient kibana-proxy -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "OAuthClient", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:45Z", 
                    "labels": {
                        "logging-infra": "support"
                    }, 
                    "name": "kibana-proxy", 
                    "resourceVersion": "1697", 
                    "selfLink": "/oapi/v1/oauthclients/kibana-proxy", 
                    "uid": "a9d71d4c-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "redirectURIs": [
                    "https://kibana.router.default.svc.cluster.local"
                ], 
                "scopeRestrictions": [
                    {
                        "literals": [
                            "user:info", 
                            "user:check-access", 
                            "user:list-projects"
                        ]
                    }
                ], 
                "secret": "AohHiVm6HSxFBdszf0bVUicNkedhabU3ogYkY7MoZZLAa182qxevjkyB4r8uegVl"
            }
        ], 
        "returncode": 0
    }, 
    "state": "list"
}

TASK [openshift_logging_kibana : set_fact] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:159
ok: [openshift] => {
    "ansible_facts": {
        "proxy_hostnames": [
            "https://kibana.router.default.svc.cluster.local", 
            "https://kibana-ops.router.default.svc.cluster.local"
        ]
    }, 
    "changed": false
}

TASK [openshift_logging_kibana : Create oauth-client template] *****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:162
changed: [openshift] => {
    "changed": true, 
    "checksum": "c3dd27a856444d5e336286c63536c9315fe5c3a9", 
    "dest": "/tmp/openshift-logging-ansible-Pq0tEz/templates/oauth-client.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "2f6e3dc97d7a83c5f3ff7fbd07cd15ea", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 382, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026578.71-63810181955765/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_kibana : Set kibana-proxy oauth-client] ****************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:170
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get oauthclient kibana-proxy -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "OAuthClient", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:42:45Z", 
                    "labels": {
                        "logging-infra": "support"
                    }, 
                    "name": "kibana-proxy", 
                    "resourceVersion": "1750", 
                    "selfLink": "/oapi/v1/oauthclients/kibana-proxy", 
                    "uid": "a9d71d4c-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "redirectURIs": [
                    "https://kibana.router.default.svc.cluster.local", 
                    "https://kibana-ops.router.default.svc.cluster.local"
                ], 
                "scopeRestrictions": [
                    {
                        "literals": [
                            "user:info", 
                            "user:check-access", 
                            "user:list-projects"
                        ]
                    }
                ], 
                "secret": "AohHiVm6HSxFBdszf0bVUicNkedhabU3ogYkY7MoZZLAa182qxevjkyB4r8uegVl"
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : Set Kibana secret] ****************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:181
ok: [openshift] => {
    "changed": false, 
    "results": {
        "apiVersion": "v1", 
        "data": {
            "ca": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMyakNDQWNLZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUyTkRFME0xb1hEVEl5TURZd09ERTJOREUwTkZvdwpIakVjTUJvR0ExVUVBeE1UYkc5bloybHVaeTF6YVdkdVpYSXRkR1Z6ZERDQ0FTSXdEUVlKS29aSWh2Y05BUUVCCkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUtpMlFDa2NEeTNHQ3krU3N0YXVaZXpmcTB5T1Iyd0sxNWhOQWlEU3BGNzEKQ0p5VzlnYnNPN2duZ28rNjdsdmM4WlNYT3pYSkJtQWtFSEZUQ0l3c0hZUWY1Y2pmbUY1d2k1a3BlVUdmZTBaUQpkUnR3RzllQmU5K0srVnJqQnpoTUxaQTVjY1hSTE5QT3pQUzFHOWFyd2RzSjFZT0FkVzdXN2pzMnFwUXdESzJxClFOK2x3STh1ei9kWDFLWnJxM1lnVXlHRWJpeGQ0Q1l3SU9VUGV2QUxaUGtrbFNFNnZhaVVCRkY1blNCWXJzZ2YKWUlxOEtLL0ZUUXcrUHpIOGl1ZlRsNlY2ZHlka0lYbVphbkZTdGExM2VaRjNNM1lwd05Mbk1Gazg2OC9UNUF4SgovWHRQNjNqOUtnNS9vbHdMTDgyQUdFajB6WDFNOU96K2NOdkNOb0J2alBrQ0F3RUFBYU1qTUNFd0RnWURWUjBQCkFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFKWTkKRHRsWkxnYkNhQUwwbzYxSVdoaVo0NlRHNnE2RmNYSEF3Yk9sMmhLSDJpUHhwOXZPc3htR2ZiZS9WckJQNkxnMgp1U3ZMZUlId3VQajA1M0MvdENvZFBTS3oyQnBnd05ZNWp0TmhYRXdJRnRpdWFIVlRJZmRnWWg3OGZNbUcwcnFSCkkwUG1ISU9tV2l1NDkyZ0JSb3hpWE1mZEc2azJ4cXlDU3lQeHVEN1NTZU1YT3ZKWWtBMHNCMlVnY1lCbTYxTGcKdzFicXloNEYvWU51U2VGRGthWjFDTlNmNUN4Mi9EOUxsQVFjSktTUnU0c1pDcC9VWmJzalBjcGQvaGtBOURwaQpBSTJzQXUyZVMxREx3dDJKMVl5RmhrdXBXUjJwR2JmdGZzWUhPa2djdnp6NzFSWTF4NkNoclhPSHY5MHVjTXBSCnhvUTNiN2E4VHg4WFhqcUtLc3c9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", 
            "cert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURSVENDQWkyZ0F3SUJBZ0lCQXpBTkJna3Foa2lHOXcwQkFRVUZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUyTkRFMU1Gb1hEVEU1TURZd09URTJOREUxTUZvdwpSakVRTUE0R0ExVUVDZ3dIVEc5bloybHVaekVTTUJBR0ExVUVDd3dKVDNCbGJsTm9hV1owTVI0d0hBWURWUVFECkRCVnplWE4wWlcwdWJHOW5aMmx1Wnk1cmFXSmhibUV3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXcKZ2dFS0FvSUJBUURDQ0hHOWVJVmI1WDBZQmI3Z2hEY2FNL0h3MEN2YmFHNnVEeVIyVXhCMDR0bWFiWllEaXdLRQpxcjVaTXhUMWo4b0pnY2RBQU5vVGpSWVN3S3N2bkhyN2tpQTQvZnVOUVdJN1h3aXVOeVhpeDRBZWEzejdZSzF0ClN6bS9rR3ZiSC95Y0NyVnVCRE1HUVRlQWhHbUlqQUVVM2hwSWZKYktRRU5pM3FVMldPWFpCc05ZZGpoOWpJUEQKUFdxNE1zUVJwZG1PZWVrZ1kvRnNiUlNkakJGNGJWRHMrSUdZWHllN3NDa3EvMUh3QkQ5YUdISWRUVWt3L3czcQpIMTlyZVF1d0dpaHlTNnZmbk84Mk1UdGdYM1hoYmpLM2lBTFNYa0JhaTloRGhiQ0cxVzhPTmlNeWdrQ3drSTRjCndNSkpTaExxekVKd1VTSFZqOElnMmtISVpzT1Mwa21SQWdNQkFBR2paakJrTUE0R0ExVWREd0VCL3dRRUF3SUYKb0RBSkJnTlZIUk1FQWpBQU1CMEdBMVVkSlFRV01CUUdDQ3NHQVFVRkJ3TUJCZ2dyQmdFRkJRY0RBakFkQmdOVgpIUTRFRmdRVUN5U3hFemNQbkMrMnp0QXVLOGFyRWgwaWFLSXdDUVlEVlIwakJBSXdBREFOQmdrcWhraUc5dzBCCkFRVUZBQU9DQVFFQUhWQ2lhdTZNcmVMNmZwWnpDcHJvL1ZyMGtST2w1bUNlQ1JLT3k0eXpYSE9JaGtnMmIraC8KcG5aZm5uSjB0NWtheTd3VmhLWDJmM2R5czhFMkhua0J1TDdOVE8ySTNYQUtuZTJKamNyemwwZCtVb1hUVTlwWApIVGxiM2djdWNuUVNBdWkxSXZ4NENFRFNnN2g5Tko0ZXFOT2U0VWVMV2o2RjF6Q09VSlJHUG8rcWhMSjMvWWkwCldZallaUmxzM2xMdzhkdm9LR2pBOGNnZERSZlgvM0pEZ0E3V0RUZ0RzdXJOaGE5WG1FVW1URWJLZ1N1dUREeUcKQXE3Vy92V1lCL3o3Qy9Dc3NXRFN1WkFJeFFxVW03TC9ValhCeXhobXdUaGU2bU1Oc1JmVWllanhBNTdaUy9jNgpKVkFPNEZJSnhOMXBtc0NiM0ZiWnc4d0xrSzlqdGU3TE9nPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", 
            "key": "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRRENDSEc5ZUlWYjVYMFkKQmI3Z2hEY2FNL0h3MEN2YmFHNnVEeVIyVXhCMDR0bWFiWllEaXdLRXFyNVpNeFQxajhvSmdjZEFBTm9UalJZUwp3S3N2bkhyN2tpQTQvZnVOUVdJN1h3aXVOeVhpeDRBZWEzejdZSzF0U3ptL2tHdmJIL3ljQ3JWdUJETUdRVGVBCmhHbUlqQUVVM2hwSWZKYktRRU5pM3FVMldPWFpCc05ZZGpoOWpJUERQV3E0TXNRUnBkbU9lZWtnWS9Gc2JSU2QKakJGNGJWRHMrSUdZWHllN3NDa3EvMUh3QkQ5YUdISWRUVWt3L3czcUgxOXJlUXV3R2loeVM2dmZuTzgyTVR0ZwpYM1hoYmpLM2lBTFNYa0JhaTloRGhiQ0cxVzhPTmlNeWdrQ3drSTRjd01KSlNoTHF6RUp3VVNIVmo4SWcya0hJClpzT1Mwa21SQWdNQkFBRUNnZ0VBQzBQc1BxRHB0VDBzdHlGV1JMakliamdIMndCckczRUR4QTFUQTUrcysreFQKei9oKzFqUFM4SnJ0TkVhWDkrM2lRQTBONFhSck5PbGpGN0doL3NDcUQwTWJwZE54TUhGVVg3Z1R5L0dkdkgzeQp2VkZtVHozOHM3VERaSkRoQWpib05yTVVkNCs4MnFjY3J0U0JRQkZJMnNwT1lsa0JkRHFvN3V1WVVnQjFUOVZQCnpuaVY4T216NzhIeVNmOTliOW1oY2x4VlJJNndTZC8vbWJScUV6ajR2bks0QVRhV05hd28wQkxpQnpYVE40VzAKdGtyM0tjVzBlMTA3WE5DRkl4YTNjbFF5TXVCelVPL00zZm4vWGx5MEs4Vi8yYkVUSFJ2eXZCc2xKSEhxQnZZNwppKzRKL29WRzNDcnlYMHNqWXNkOUdIMnZKU3AyM1gvTEljVkY3MGpjRlFLQmdRRGhkUnNla3RHK2IrZUt1VkhhCnY3VkNKelZqQzRZZm04S3VsdDZkY0dGeDNobnJjRUIxcGtGclMvVTgwaDJZSEIwbXNhUDBXc0NILzIraDhQR0YKQ1NkQmtTdU9OQ1dGaHhUUEhNZVg4Unl6YitLU04rMjIvdUg5MFVxZzBObkdRd1FJcldsU01EbVZRVnhqblZhTwplMHVYWVNVSWt2czI4anp2TERNSTN4RklUd0tCZ1FEY1VZaXdkSzFrYVZnR1dWRk9qYTRJR1FrMU4rd1g0Z0VvCldiTXYxWVFhVGEyQ1VGQkFvZU5mVThwY2o5RFRhL0JoSnV4UHZjTmlRUFZrcW9JREpYSGo2b0Jib1RJOGNpWEoKdXhkb0NraVVrMXU1L2lkSUpzYisrYjJSVWN5cWpTVnJ0eit3b2luTk9zN1dhZXBTVVUxb3lqbWxlMEdWNGMzSgovbmlDV2ZMNEh3S0JnUUNxVEVtYTAyM0kra0FjUWhVNU5YdVZwNFlrOVV4dlRibEZKVUtzQzNvZmg1TnVpcEMyCkRVbjI0UjR4dmNpSzRCbUk0NytqVUhXRGhJZHUvRStDbk1KaFBXVjMrWjJwK2JuWEk3Y0Q1eVUxeUxxTWxlQmkKVEs2cGlWQmtucjBRakFoZEFaZS9kQnZheU04ZmszWVJFOEtxYXIrTmRYQjZOWW1mWTNSOUsvbWk4d0tCZ0duMApsRVk0ZHNNWm5vbS9Ic2ttZmhISzZKQzlpaithSTNCaVl6aVgrT1lsZ1ZKd2lZZnIwMHVmSW0wTmhTS0lnSnhZCmVjR0h3NVZYSkN6N2kzY3o1T2YxZVRHWC9tN3ZmcGpiNzAvTDBrZ1I3a1V1bzNtaG5JWTNXU0d3TXp1NmtjcVIKNElGMEc2SmxxOStXSkdqZVROb2VsMGFzVmtoY3hmbGs5bFBLS24yRkFvR0FMUCs5ZHMxS1p5eFFhdTlSZ3RxRwpxbEhUeUVvelc0aUQ4L29EbktrMHlYQWswdmdBQXJSRU5IUzBJNmlBYU90SGJLa2lwMkFoMkV4bW9ORnZLbWQwCmNzY3lPL0NDR3Vubms5bTlxVHVzUUh3VCtwVHV4U3BNMXB5UEgyZEhaTFY2SFdkRlhseVhRYXNhdUthck0zRzUKckpTN05pL0NNUXZXaDRrSWZOWjhNeWM9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K"
        }, 
        "kind": "Secret", 
        "metadata": {
            "creationTimestamp": null, 
            "name": "logging-kibana"
        }, 
        "type": "Opaque"
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : Set Kibana Proxy secret] **********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:195
ok: [openshift] => {
    "changed": false, 
    "results": {
        "apiVersion": "v1", 
        "data": {
            "oauth-secret": "QW9oSGlWbTZIU3hGQmRzemYwYlZVaWNOa2VkaGFiVTNvZ1lrWTdNb1paTEFhMTgycXhldmpreUI0cjh1ZWdWbA==", 
            "server-cert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURUakNDQWphZ0F3SUJBZ0lCQWpBTkJna3Foa2lHOXcwQkFRc0ZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUyTkRFME5Wb1hEVEU1TURZd09URTJOREUwTmxvdwpGakVVTUJJR0ExVUVBeE1MSUd0cFltRnVZUzF2Y0hNd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3CmdnRUtBb0lCQVFERDBtZHptS0pZSmVpaVZrVHh6SW94dkJ4czVVRGxidDJCbkg4RUFlTlRKYmNFUENyRG9BT0kKRVl4QzZlZUpQY296dFA5Y3dUMEVtcDBTZW1BZjBPQWZGUlhZYmFMRjY3RDlOenpHN2UxWHJDWFVRMTZTWnZDWQpyOEdKRFBtYm92TXZyc2traXN4YThYL3hKVENwaTc2MlpVMWgxRWREVkxybW9IYWZHcjFTTE5pU3hXRE1hZkZtCnVsZE1XMmxaZ3p0RlE5dzJLTFBIWWtKRE1QcEVGOHdtdUY4bm5oR0NhMzc1R1ZKbXlRY09kTmg5WFBqS0dTSWIKQ1g1N0pkWHViN2IzMUJiell6bFZhYzY0aVdmUExxNktpenpyYUFQR1JmaEs2OURPdm1SUzRFVHpOQk81M2tLZQpDZDQvRzBMNG5PR0crUDR1V0lJdXQrTVMzZTRLZTZ5NUFnTUJBQUdqZ1o0d2dac3dEZ1lEVlIwUEFRSC9CQVFECkFnV2dNQk1HQTFVZEpRUU1NQW9HQ0NzR0FRVUZCd01CTUF3R0ExVWRFd0VCL3dRQ01BQXdaZ1lEVlIwUkJGOHcKWFlJTElHdHBZbUZ1WVMxdmNIT0NMQ0JyYVdKaGJtRXRiM0J6TG5KdmRYUmxjaTVrWldaaGRXeDBMbk4yWXk1agpiSFZ6ZEdWeUxteHZZMkZzZ2hnZ2EybGlZVzVoTGpFeU55NHdMakF1TVM1NGFYQXVhVytDQm10cFltRnVZVEFOCkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQVl4empONGk3V01qbHFad0lRUTlsOWZ1Y0NKbXFxWU9PQVNoeU1QaTIKNGJreW8xUkgxTEozNGljMk04NkpvZHNoaUoxTHNCODdiVllOdHFMRnAwMlE5VlpkTFdWZzBtaWNtZDNNUlEvSApHVCsyUktjeXVidThoV3VFdDJoRUNma20vSkpQRFRKVEF0UFhlWG54bEh4dXE2cFNlb053dnJxMWRSSkd6d1pjCmhTcDUxdTBnYnZHR1YzYVJGQkIzdHNDQlBmdWZYWUQ0djFRYTFLNkRWVmJqOWJyYUhrM0pTSktRWHRRelRmY28KeEFNbWNVa2w1TWRwblViVC9XeTlRRjJlaVZmNlJvU29HZWNzMmh6Uk5iTUE4cTEwMm5Bd2phbndpUWZ4OUE1NgowZ1EwY1RrdjlpN1R0VUg1MG55TW9oUUpGUkU2Zk1RSDFmdEJ6aHVKaUN3QVpnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJQzJqQ0NBY0tnQXdJQkFnSUJBVEFOQmdrcWhraUc5dzBCQVFzRkFEQWVNUnd3R2dZRFZRUURFeE5zYjJkbgphVzVuTFhOcFoyNWxjaTEwWlhOME1CNFhEVEUzTURZd09URTJOREUwTTFvWERUSXlNRFl3T0RFMk5ERTBORm93CkhqRWNNQm9HQTFVRUF4TVRiRzluWjJsdVp5MXphV2R1WlhJdGRHVnpkRENDQVNJd0RRWUpLb1pJaHZjTkFRRUIKQlFBRGdnRVBBRENDQVFvQ2dnRUJBS2kyUUNrY0R5M0dDeStTc3RhdVplemZxMHlPUjJ3SzE1aE5BaURTcEY3MQpDSnlXOWdic083Z25nbys2N2x2YzhaU1hPelhKQm1Ba0VIRlRDSXdzSFlRZjVjamZtRjV3aTVrcGVVR2ZlMFpRCmRSdHdHOWVCZTkrSytWcmpCemhNTFpBNWNjWFJMTlBPelBTMUc5YXJ3ZHNKMVlPQWRXN1c3anMycXBRd0RLMnEKUU4rbHdJOHV6L2RYMUtacnEzWWdVeUdFYml4ZDRDWXdJT1VQZXZBTFpQa2tsU0U2dmFpVUJGRjVuU0JZcnNnZgpZSXE4S0svRlRRdytQekg4aXVmVGw2VjZkeWRrSVhtWmFuRlN0YTEzZVpGM00zWXB3TkxuTUZrODY4L1Q1QXhKCi9YdFA2M2o5S2c1L29sd0xMODJBR0VqMHpYMU05T3orY052Q05vQnZqUGtDQXdFQUFhTWpNQ0V3RGdZRFZSMFAKQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCL3dRRk1BTUJBZjh3RFFZSktvWklodmNOQVFFTEJRQURnZ0VCQUpZOQpEdGxaTGdiQ2FBTDBvNjFJV2hpWjQ2VEc2cTZGY1hIQXdiT2wyaEtIMmlQeHA5dk9zeG1HZmJlL1ZyQlA2TGcyCnVTdkxlSUh3dVBqMDUzQy90Q29kUFNLejJCcGd3Tlk1anROaFhFd0lGdGl1YUhWVElmZGdZaDc4Zk1tRzBycVIKSTBQbUhJT21XaXU0OTJnQlJveGlYTWZkRzZrMnhxeUNTeVB4dUQ3U1NlTVhPdkpZa0Ewc0IyVWdjWUJtNjFMZwp3MWJxeWg0Ri9ZTnVTZUZEa2FaMUNOU2Y1Q3gyL0Q5TGxBUWNKS1NSdTRzWkNwL1VaYnNqUGNwZC9oa0E5RHBpCkFJMnNBdTJlUzFETHd0MkoxWXlGaGt1cFdSMnBHYmZ0ZnNZSE9rZ2N2eno3MVJZMXg2Q2hyWE9IdjkwdWNNcFIKeG9RM2I3YThUeDhYWGpxS0tzdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", 
            "server-key": "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdzlKbmM1aWlXQ1hvb2xaRThjeUtNYndjYk9WQTVXN2RnWngvQkFIalV5VzNCRHdxCnc2QURpQkdNUXVubmlUM0tNN1QvWE1FOUJKcWRFbnBnSDlEZ0h4VVYyRzJpeGV1dy9UYzh4dTN0VjZ3bDFFTmUKa21id21LL0JpUXo1bTZMekw2N0pKSXJNV3ZGLzhTVXdxWXUrdG1WTllkUkhRMVM2NXFCMm54cTlVaXpZa3NWZwp6R254WnJwWFRGdHBXWU03UlVQY05paXp4MkpDUXpENlJCZk1KcmhmSjU0UmdtdCsrUmxTWnNrSERuVFlmVno0Cnloa2lHd2wrZXlYVjdtKzI5OVFXODJNNVZXbk91SWxuenk2dWlvczg2MmdEeGtYNFN1dlF6cjVrVXVCRTh6UVQKdWQ1Q25nbmVQeHRDK0p6aGh2aitMbGlDTHJmakV0M3VDbnVzdVFJREFRQUJBb0lCQUMvalZZOU1aVzgvSGd3TgpJdUVyRHlTb2h4OE9WWGVHVEQ4MUlVNitrM1F6QmZYUUxQa1lPYVRkL2JvamI0NzlnR1J5di94VCtYTVZ5SkhMClFaU21SUmowMUJuUWZmeis3WktQSHByYmRxNTVGK3MzNm5uTkdNdU9ON3NuWkNkdWl5QS9FMHVQY2ZoUGxPVkoKSGdSb2UyUXBrK24zdXRHNWQ5ZU1xRld5ZVZnTjE5cGJ4RGo5cXdJc0F1QVRoZXY2NkJ5RU5ia0hBalNwUXh5OApqZnB4U05HYkFQeUlyaE15bElUTjVFL2tVUThtZnBiaVJCNUw1ZEx4Y0hVdUV4T1N1L3BoVHNHbHFvZzZ1Z2hyCkRoNkdXVTdKdXhhVXdSWURVWVdCZTlZN3R4bnRDMTdvRngybkh0ZWFLVVZNL3R0QnhXTzRGcEdWdHArd05DWFgKN1kybWtIRUNnWUVBN1dLQkZiSkJFVHhTMWdLS2RrYW9jOG40QzN0UUNBbnprUno4SGhFK2pqM3NWSVdhMU5GSwo0MHpvYkNZbGoxbVZMMGFNRlgvRXhibFZKdUJUeEJqTHQ0c0g4UWwzcnNQTDV6cXFCZ09IS0hOT0t4WlpXU0RoClY1akN5aXhnMENYWEpSWGtaYWY1Ukh4OTVqamNmK1ZmVU13NHJlL0syK2xUVlRCOXcwcy9aNTBDZ1lFQTB5MkcKaGpqbzF2ZTh6T01xeEJiQk9JeE5aSktWSmlta2JjNlJWU21jcXZtZWxybE9naUR4dHp4bnMzcEdkUlJhQ043aQovRHN6NnUxNitnSkE1d3hpSGJsYUNOUGF4RWYyQWVTUDEvaUgrczI2NEJaVFh6bWQwWk9HSElHQUN4WEN0ZVpzCnE3R3JTa2RyTmN6K1Z4ZmxCRFR2K1BEZk5GeGhlVnV3a2Fid1JNMENnWUFob2JRU0k5Z09IaExLV1QvT3RBSUgKclpTajQ4dVo1L2NIRnlrdEJWcHNncDlwOWlmU2dQaHdMYVE4TmtkYUFPSWUrWXE5UFFYZTBYd2I3dXBQUFFnWApyKzNuRzJWR2NGMENISkNjRzRIWm5FUjEzRXU2VWVzN0l0eHVFazJCRCt0a1NLNjhMRWtuaDgrNmYxSFdHT1FoCktWM2FYci9KMmt2QkpRYWNWWnZkRFFLQmdIWHgyRStBMjNLc3ZQTUY4VzNCYzd0Y2lTRWh5VkdlTW1ydDB6Zm8KWE1zVlRhREs4SEUzYXlrM0FJZzZRNzNDVnlMYk1aVkVqNU9uZldiQytYTFI5TDY3TmZEMk9ob2tRdDc5dytVUQp0d201KzZTS2M1Z2N1dXRCWk1icEFSV3YycWJ6Z2tCR28rK0JnZnh6ZUo0MkJ6L1pQU29oaGRoZHZFY21rYkJYCnQ1d0ZBb0dCQU5saVJrcG93dHpPWG5ocFlLSlhZR3lFeUc0RjJ1K1ZuWUtJdEVBVk8weDlaaWtuTGYzc3llK0gKQTgrRVJGbmFKZTI3dk9udERmaGRRTW8vSVJ5RlllVEswdGJWWTlUSVBoTGNoVk5PVm5zY2F3V3BsQmRzNlVTcQpQTnNuREE1NTJ5UWVRZWh5Zm9zKzE3K2pmc3RlaWw0OVNKWXd5M3loTFcyQVFsQTZwMDQ1Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==", 
            "server-tls.json": "Ly8gU2VlIGZvciBhdmFpbGFibGUgb3B0aW9uczogaHR0cHM6Ly9ub2RlanMub3JnL2FwaS90bHMuaHRtbCN0bHNfdGxzX2NyZWF0ZXNlcnZlcl9vcHRpb25zX3NlY3VyZWNvbm5lY3Rpb25saXN0ZW5lcgp0bHNfb3B0aW9ucyA9IHsKCWNpcGhlcnM6ICdrRUVDREg6K2tFRUNESCtTSEE6a0VESDora0VESCtTSEE6K2tFREgrQ0FNRUxMSUE6a0VDREg6K2tFQ0RIK1NIQTprUlNBOitrUlNBK1NIQTora1JTQStDQU1FTExJQTohYU5VTEw6IWVOVUxMOiFTU0x2MjohUkM0OiFERVM6IUVYUDohU0VFRDohSURFQTorM0RFUycsCglob25vckNpcGhlck9yZGVyOiB0cnVlCn0K", 
            "session-secret": "UU4wM3FyVU1qUFJyUVkwQ25Pcm1ZMkVKdjhtV2FHSXlEc0lhdGltY0Zzb1VGYWlEZHQ2U3BzYTdMUXRERmRudm5EQmlkTVA5bGU1c1BqZ3BsU2d0NmlnSTJlTTV1MjlDQmZqeGdPeGRMVmRsNE8yVnJSQXhBbkczZ3BVOHpHYlNrcjNtd2JzbzgwZUlrekIyTzFpSlZDNEJtVjZVY3VURVJJZFJjQXVtY0ZrdHFiV09odFphNzVqV2JlOW1MZnFhc3h0YW1TSlM="
        }, 
        "kind": "Secret", 
        "metadata": {
            "creationTimestamp": null, 
            "name": "logging-kibana-proxy"
        }, 
        "type": "Opaque"
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : Generate Kibana DC template] ******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:221
changed: [openshift] => {
    "changed": true, 
    "checksum": "0c8e2078c7cbcb0c6d3d2feea603a33829a88988", 
    "dest": "/tmp/openshift-logging-ansible-Pq0tEz/templates/kibana-dc.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "419d137d5ea8abea2dda65147b81b0ad", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 3759, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026582.9-35805726107807/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_kibana : Set Kibana DC] ********************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:240
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get dc logging-kibana-ops -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "DeploymentConfig", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:43:04Z", 
                    "generation": 2, 
                    "labels": {
                        "component": "kibana-ops", 
                        "logging-infra": "kibana", 
                        "provider": "openshift"
                    }, 
                    "name": "logging-kibana-ops", 
                    "namespace": "logging", 
                    "resourceVersion": "1774", 
                    "selfLink": "/oapi/v1/namespaces/logging/deploymentconfigs/logging-kibana-ops", 
                    "uid": "b534d892-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "replicas": 1, 
                    "selector": {
                        "component": "kibana-ops", 
                        "logging-infra": "kibana", 
                        "provider": "openshift"
                    }, 
                    "strategy": {
                        "activeDeadlineSeconds": 21600, 
                        "resources": {}, 
                        "rollingParams": {
                            "intervalSeconds": 1, 
                            "maxSurge": "25%", 
                            "maxUnavailable": "25%", 
                            "timeoutSeconds": 600, 
                            "updatePeriodSeconds": 1
                        }, 
                        "type": "Rolling"
                    }, 
                    "template": {
                        "metadata": {
                            "creationTimestamp": null, 
                            "labels": {
                                "component": "kibana-ops", 
                                "logging-infra": "kibana", 
                                "provider": "openshift"
                            }, 
                            "name": "logging-kibana-ops"
                        }, 
                        "spec": {
                            "containers": [
                                {
                                    "env": [
                                        {
                                            "name": "ES_HOST", 
                                            "value": "logging-es-ops"
                                        }, 
                                        {
                                            "name": "ES_PORT", 
                                            "value": "9200"
                                        }, 
                                        {
                                            "name": "KIBANA_MEMORY_LIMIT", 
                                            "valueFrom": {
                                                "resourceFieldRef": {
                                                    "containerName": "kibana", 
                                                    "divisor": "0", 
                                                    "resource": "limits.memory"
                                                }
                                            }
                                        }
                                    ], 
                                    "image": "172.30.197.120:5000/logging/logging-kibana:latest", 
                                    "imagePullPolicy": "Always", 
                                    "name": "kibana", 
                                    "readinessProbe": {
                                        "exec": {
                                            "command": [
                                                "/usr/share/kibana/probe/readiness.sh"
                                            ]
                                        }, 
                                        "failureThreshold": 3, 
                                        "initialDelaySeconds": 5, 
                                        "periodSeconds": 5, 
                                        "successThreshold": 1, 
                                        "timeoutSeconds": 4
                                    }, 
                                    "resources": {
                                        "limits": {
                                            "memory": "736Mi"
                                        }
                                    }, 
                                    "terminationMessagePath": "/dev/termination-log", 
                                    "terminationMessagePolicy": "File", 
                                    "volumeMounts": [
                                        {
                                            "mountPath": "/etc/kibana/keys", 
                                            "name": "kibana", 
                                            "readOnly": true
                                        }
                                    ]
                                }, 
                                {
                                    "env": [
                                        {
                                            "name": "OAP_BACKEND_URL", 
                                            "value": "http://localhost:5601"
                                        }, 
                                        {
                                            "name": "OAP_AUTH_MODE", 
                                            "value": "oauth2"
                                        }, 
                                        {
                                            "name": "OAP_TRANSFORM", 
                                            "value": "user_header,token_header"
                                        }, 
                                        {
                                            "name": "OAP_OAUTH_ID", 
                                            "value": "kibana-proxy"
                                        }, 
                                        {
                                            "name": "OAP_MASTER_URL", 
                                            "value": "https://kubernetes.default.svc.cluster.local"
                                        }, 
                                        {
                                            "name": "OAP_PUBLIC_MASTER_URL", 
                                            "value": "https://172.18.7.3:8443"
                                        }, 
                                        {
                                            "name": "OAP_LOGOUT_REDIRECT", 
                                            "value": "https://172.18.7.3:8443/console/logout"
                                        }, 
                                        {
                                            "name": "OAP_MASTER_CA_FILE", 
                                            "value": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
                                        }, 
                                        {
                                            "name": "OAP_DEBUG", 
                                            "value": "False"
                                        }, 
                                        {
                                            "name": "OAP_OAUTH_SECRET_FILE", 
                                            "value": "/secret/oauth-secret"
                                        }, 
                                        {
                                            "name": "OAP_SERVER_CERT_FILE", 
                                            "value": "/secret/server-cert"
                                        }, 
                                        {
                                            "name": "OAP_SERVER_KEY_FILE", 
                                            "value": "/secret/server-key"
                                        }, 
                                        {
                                            "name": "OAP_SERVER_TLS_FILE", 
                                            "value": "/secret/server-tls.json"
                                        }, 
                                        {
                                            "name": "OAP_SESSION_SECRET_FILE", 
                                            "value": "/secret/session-secret"
                                        }, 
                                        {
                                            "name": "OCP_AUTH_PROXY_MEMORY_LIMIT", 
                                            "valueFrom": {
                                                "resourceFieldRef": {
                                                    "containerName": "kibana-proxy", 
                                                    "divisor": "0", 
                                                    "resource": "limits.memory"
                                                }
                                            }
                                        }
                                    ], 
                                    "image": "172.30.197.120:5000/logging/logging-auth-proxy:latest", 
                                    "imagePullPolicy": "Always", 
                                    "name": "kibana-proxy", 
                                    "ports": [
                                        {
                                            "containerPort": 3000, 
                                            "name": "oaproxy", 
                                            "protocol": "TCP"
                                        }
                                    ], 
                                    "resources": {
                                        "limits": {
                                            "memory": "96Mi"
                                        }
                                    }, 
                                    "terminationMessagePath": "/dev/termination-log", 
                                    "terminationMessagePolicy": "File", 
                                    "volumeMounts": [
                                        {
                                            "mountPath": "/secret", 
                                            "name": "kibana-proxy", 
                                            "readOnly": true
                                        }
                                    ]
                                }
                            ], 
                            "dnsPolicy": "ClusterFirst", 
                            "restartPolicy": "Always", 
                            "schedulerName": "default-scheduler", 
                            "securityContext": {}, 
                            "serviceAccount": "aggregated-logging-kibana", 
                            "serviceAccountName": "aggregated-logging-kibana", 
                            "terminationGracePeriodSeconds": 30, 
                            "volumes": [
                                {
                                    "name": "kibana", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-kibana"
                                    }
                                }, 
                                {
                                    "name": "kibana-proxy", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-kibana-proxy"
                                    }
                                }
                            ]
                        }
                    }, 
                    "test": false, 
                    "triggers": [
                        {
                            "type": "ConfigChange"
                        }
                    ]
                }, 
                "status": {
                    "availableReplicas": 0, 
                    "conditions": [
                        {
                            "lastTransitionTime": "2017-06-09T16:43:04Z", 
                            "lastUpdateTime": "2017-06-09T16:43:04Z", 
                            "message": "Deployment config does not have minimum availability.", 
                            "status": "False", 
                            "type": "Available"
                        }, 
                        {
                            "lastTransitionTime": "2017-06-09T16:43:04Z", 
                            "lastUpdateTime": "2017-06-09T16:43:04Z", 
                            "message": "replication controller \"logging-kibana-ops-1\" is waiting for pod \"logging-kibana-ops-1-deploy\" to run", 
                            "status": "Unknown", 
                            "type": "Progressing"
                        }
                    ], 
                    "details": {
                        "causes": [
                            {
                                "type": "ConfigChange"
                            }
                        ], 
                        "message": "config change"
                    }, 
                    "latestVersion": 1, 
                    "observedGeneration": 2, 
                    "replicas": 0, 
                    "unavailableReplicas": 0, 
                    "updatedReplicas": 0
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : Delete temp directory] ************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:252
ok: [openshift] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-Pq0tEz", 
    "state": "absent"
}

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:195
statically included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml

TASK [openshift_logging_curator : fail] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:3
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:7
ok: [openshift] => {
    "ansible_facts": {
        "curator_version": "3_5"
    }, 
    "changed": false
}

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : fail] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:15
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : Create temp directory for doing work in] *****
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:5
ok: [openshift] => {
    "changed": false, 
    "cmd": [
        "mktemp", 
        "-d", 
        "/tmp/openshift-logging-ansible-XXXXXX"
    ], 
    "delta": "0:00:00.002655", 
    "end": "2017-06-09 12:43:06.901980", 
    "rc": 0, 
    "start": "2017-06-09 12:43:06.899325"
}

STDOUT:

/tmp/openshift-logging-ansible-7ebS4F

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:10
ok: [openshift] => {
    "ansible_facts": {
        "tempdir": "/tmp/openshift-logging-ansible-7ebS4F"
    }, 
    "changed": false
}

TASK [openshift_logging_curator : Create templates subdirectory] ***************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:14
ok: [openshift] => {
    "changed": false, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/openshift-logging-ansible-7ebS4F/templates", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 40, 
    "state": "directory", 
    "uid": 0
}

TASK [openshift_logging_curator : Create Curator service account] **************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:24
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : Create Curator service account] **************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:32
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get sa aggregated-logging-curator -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "imagePullSecrets": [
                    {
                        "name": "aggregated-logging-curator-dockercfg-66nqb"
                    }
                ], 
                "kind": "ServiceAccount", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:43:07Z", 
                    "name": "aggregated-logging-curator", 
                    "namespace": "logging", 
                    "resourceVersion": "1788", 
                    "selfLink": "/api/v1/namespaces/logging/serviceaccounts/aggregated-logging-curator", 
                    "uid": "b7780771-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "secrets": [
                    {
                        "name": "aggregated-logging-curator-token-1n8bk"
                    }, 
                    {
                        "name": "aggregated-logging-curator-dockercfg-66nqb"
                    }
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : copy] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:41
ok: [openshift] => {
    "changed": false, 
    "checksum": "9008efd9a8892dcc42c28c6dfb6708527880a6d8", 
    "dest": "/tmp/openshift-logging-ansible-7ebS4F/curator.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "5498c5fd98f3dd06e34b20eb1f55dc12", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 320, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026588.5-210223337910038/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_curator : copy] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:47
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : Set Curator configmap] ***********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:53
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get configmap logging-curator -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "data": {
                    "config.yaml": "# Logging example curator config file\n\n# uncomment and use this to override the defaults from env vars\n#.defaults:\n#  delete:\n#    days: 30\n#  runhour: 0\n#  runminute: 0\n\n# to keep ops logs for a different duration:\n#.operations:\n#  delete:\n#    weeks: 8\n\n# example for a normal project\n#myapp:\n#  delete:\n#    weeks: 1\n"
                }, 
                "kind": "ConfigMap", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:43:09Z", 
                    "name": "logging-curator", 
                    "namespace": "logging", 
                    "resourceVersion": "1802", 
                    "selfLink": "/api/v1/namespaces/logging/configmaps/logging-curator", 
                    "uid": "b8474ba1-4d32-11e7-ae30-0e910886c5dc"
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : Set Curator secret] **************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:62
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc secrets new logging-curator ca=/etc/origin/logging/ca.crt key=/etc/origin/logging/system.logging.curator.key cert=/etc/origin/logging/system.logging.curator.crt -n logging", 
        "results": "", 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:75
ok: [openshift] => {
    "ansible_facts": {
        "curator_component": "curator", 
        "curator_name": "logging-curator"
    }, 
    "changed": false
}

TASK [openshift_logging_curator : Generate Curator deploymentconfig] ***********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:81
ok: [openshift] => {
    "changed": false, 
    "checksum": "7b0ce0c4925ff50af5cb333e414a3b513ca64c56", 
    "dest": "/tmp/openshift-logging-ansible-7ebS4F/templates/curator-dc.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "489c0bccea8c7dfc264c187e07af6e1a", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 2341, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026590.57-180139434365306/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_curator : Set Curator DC] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:99
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get dc logging-curator -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "DeploymentConfig", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:43:11Z", 
                    "generation": 2, 
                    "labels": {
                        "component": "curator", 
                        "logging-infra": "curator", 
                        "provider": "openshift"
                    }, 
                    "name": "logging-curator", 
                    "namespace": "logging", 
                    "resourceVersion": "1822", 
                    "selfLink": "/oapi/v1/namespaces/logging/deploymentconfigs/logging-curator", 
                    "uid": "b9be0055-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "replicas": 1, 
                    "selector": {
                        "component": "curator", 
                        "logging-infra": "curator", 
                        "provider": "openshift"
                    }, 
                    "strategy": {
                        "activeDeadlineSeconds": 21600, 
                        "recreateParams": {
                            "timeoutSeconds": 600
                        }, 
                        "resources": {}, 
                        "rollingParams": {
                            "intervalSeconds": 1, 
                            "maxSurge": "25%", 
                            "maxUnavailable": "25%", 
                            "timeoutSeconds": 600, 
                            "updatePeriodSeconds": 1
                        }, 
                        "type": "Recreate"
                    }, 
                    "template": {
                        "metadata": {
                            "creationTimestamp": null, 
                            "labels": {
                                "component": "curator", 
                                "logging-infra": "curator", 
                                "provider": "openshift"
                            }, 
                            "name": "logging-curator"
                        }, 
                        "spec": {
                            "containers": [
                                {
                                    "env": [
                                        {
                                            "name": "K8S_HOST_URL", 
                                            "value": "https://kubernetes.default.svc.cluster.local"
                                        }, 
                                        {
                                            "name": "ES_HOST", 
                                            "value": "logging-es"
                                        }, 
                                        {
                                            "name": "ES_PORT", 
                                            "value": "9200"
                                        }, 
                                        {
                                            "name": "ES_CLIENT_CERT", 
                                            "value": "/etc/curator/keys/cert"
                                        }, 
                                        {
                                            "name": "ES_CLIENT_KEY", 
                                            "value": "/etc/curator/keys/key"
                                        }, 
                                        {
                                            "name": "ES_CA", 
                                            "value": "/etc/curator/keys/ca"
                                        }, 
                                        {
                                            "name": "CURATOR_DEFAULT_DAYS", 
                                            "value": "30"
                                        }, 
                                        {
                                            "name": "CURATOR_RUN_HOUR", 
                                            "value": "0"
                                        }, 
                                        {
                                            "name": "CURATOR_RUN_MINUTE", 
                                            "value": "0"
                                        }, 
                                        {
                                            "name": "CURATOR_RUN_TIMEZONE", 
                                            "value": "UTC"
                                        }, 
                                        {
                                            "name": "CURATOR_SCRIPT_LOG_LEVEL", 
                                            "value": "INFO"
                                        }, 
                                        {
                                            "name": "CURATOR_LOG_LEVEL", 
                                            "value": "ERROR"
                                        }
                                    ], 
                                    "image": "172.30.197.120:5000/logging/logging-curator:latest", 
                                    "imagePullPolicy": "Always", 
                                    "name": "curator", 
                                    "resources": {
                                        "limits": {
                                            "cpu": "100m"
                                        }
                                    }, 
                                    "terminationMessagePath": "/dev/termination-log", 
                                    "terminationMessagePolicy": "File", 
                                    "volumeMounts": [
                                        {
                                            "mountPath": "/etc/curator/keys", 
                                            "name": "certs", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/curator/settings", 
                                            "name": "config", 
                                            "readOnly": true
                                        }
                                    ]
                                }
                            ], 
                            "dnsPolicy": "ClusterFirst", 
                            "restartPolicy": "Always", 
                            "schedulerName": "default-scheduler", 
                            "securityContext": {}, 
                            "serviceAccount": "aggregated-logging-curator", 
                            "serviceAccountName": "aggregated-logging-curator", 
                            "terminationGracePeriodSeconds": 30, 
                            "volumes": [
                                {
                                    "name": "certs", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-curator"
                                    }
                                }, 
                                {
                                    "configMap": {
                                        "defaultMode": 420, 
                                        "name": "logging-curator"
                                    }, 
                                    "name": "config"
                                }
                            ]
                        }
                    }, 
                    "test": false, 
                    "triggers": [
                        {
                            "type": "ConfigChange"
                        }
                    ]
                }, 
                "status": {
                    "availableReplicas": 0, 
                    "conditions": [
                        {
                            "lastTransitionTime": "2017-06-09T16:43:11Z", 
                            "lastUpdateTime": "2017-06-09T16:43:11Z", 
                            "message": "Deployment config does not have minimum availability.", 
                            "status": "False", 
                            "type": "Available"
                        }, 
                        {
                            "lastTransitionTime": "2017-06-09T16:43:11Z", 
                            "lastUpdateTime": "2017-06-09T16:43:11Z", 
                            "message": "replication controller \"logging-curator-1\" is waiting for pod \"logging-curator-1-deploy\" to run", 
                            "status": "Unknown", 
                            "type": "Progressing"
                        }
                    ], 
                    "details": {
                        "causes": [
                            {
                                "type": "ConfigChange"
                            }
                        ], 
                        "message": "config change"
                    }, 
                    "latestVersion": 1, 
                    "observedGeneration": 2, 
                    "replicas": 0, 
                    "unavailableReplicas": 0, 
                    "updatedReplicas": 0
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : Delete temp directory] ***********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:109
ok: [openshift] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-7ebS4F", 
    "state": "absent"
}

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:207
statically included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml

TASK [openshift_logging_curator : fail] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:3
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:7
ok: [openshift] => {
    "ansible_facts": {
        "curator_version": "3_5"
    }, 
    "changed": false
}

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : fail] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:15
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : Create temp directory for doing work in] *****
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:5
ok: [openshift] => {
    "changed": false, 
    "cmd": [
        "mktemp", 
        "-d", 
        "/tmp/openshift-logging-ansible-XXXXXX"
    ], 
    "delta": "0:00:00.002315", 
    "end": "2017-06-09 12:43:15.538534", 
    "rc": 0, 
    "start": "2017-06-09 12:43:15.536219"
}

STDOUT:

/tmp/openshift-logging-ansible-xJvK8N

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:10
ok: [openshift] => {
    "ansible_facts": {
        "tempdir": "/tmp/openshift-logging-ansible-xJvK8N"
    }, 
    "changed": false
}

TASK [openshift_logging_curator : Create templates subdirectory] ***************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:14
ok: [openshift] => {
    "changed": false, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/openshift-logging-ansible-xJvK8N/templates", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 40, 
    "state": "directory", 
    "uid": 0
}

TASK [openshift_logging_curator : Create Curator service account] **************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:24
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : Create Curator service account] **************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:32
ok: [openshift] => {
    "changed": false, 
    "results": {
        "cmd": "/bin/oc get sa aggregated-logging-curator -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "imagePullSecrets": [
                    {
                        "name": "aggregated-logging-curator-dockercfg-66nqb"
                    }
                ], 
                "kind": "ServiceAccount", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:43:07Z", 
                    "name": "aggregated-logging-curator", 
                    "namespace": "logging", 
                    "resourceVersion": "1788", 
                    "selfLink": "/api/v1/namespaces/logging/serviceaccounts/aggregated-logging-curator", 
                    "uid": "b7780771-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "secrets": [
                    {
                        "name": "aggregated-logging-curator-token-1n8bk"
                    }, 
                    {
                        "name": "aggregated-logging-curator-dockercfg-66nqb"
                    }
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : copy] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:41
ok: [openshift] => {
    "changed": false, 
    "checksum": "9008efd9a8892dcc42c28c6dfb6708527880a6d8", 
    "dest": "/tmp/openshift-logging-ansible-xJvK8N/curator.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "5498c5fd98f3dd06e34b20eb1f55dc12", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 320, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026596.63-279741446659991/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_curator : copy] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:47
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : Set Curator configmap] ***********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:53
ok: [openshift] => {
    "changed": false, 
    "results": {
        "cmd": "/bin/oc get configmap logging-curator -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "data": {
                    "config.yaml": "# Logging example curator config file\n\n# uncomment and use this to override the defaults from env vars\n#.defaults:\n#  delete:\n#    days: 30\n#  runhour: 0\n#  runminute: 0\n\n# to keep ops logs for a different duration:\n#.operations:\n#  delete:\n#    weeks: 8\n\n# example for a normal project\n#myapp:\n#  delete:\n#    weeks: 1\n"
                }, 
                "kind": "ConfigMap", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:43:09Z", 
                    "name": "logging-curator", 
                    "namespace": "logging", 
                    "resourceVersion": "1802", 
                    "selfLink": "/api/v1/namespaces/logging/configmaps/logging-curator", 
                    "uid": "b8474ba1-4d32-11e7-ae30-0e910886c5dc"
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : Set Curator secret] **************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:62
ok: [openshift] => {
    "changed": false, 
    "results": {
        "apiVersion": "v1", 
        "data": {
            "ca": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMyakNDQWNLZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUyTkRFME0xb1hEVEl5TURZd09ERTJOREUwTkZvdwpIakVjTUJvR0ExVUVBeE1UYkc5bloybHVaeTF6YVdkdVpYSXRkR1Z6ZERDQ0FTSXdEUVlKS29aSWh2Y05BUUVCCkJRQURnZ0VQQURDQ0FRb0NnZ0VCQUtpMlFDa2NEeTNHQ3krU3N0YXVaZXpmcTB5T1Iyd0sxNWhOQWlEU3BGNzEKQ0p5VzlnYnNPN2duZ28rNjdsdmM4WlNYT3pYSkJtQWtFSEZUQ0l3c0hZUWY1Y2pmbUY1d2k1a3BlVUdmZTBaUQpkUnR3RzllQmU5K0srVnJqQnpoTUxaQTVjY1hSTE5QT3pQUzFHOWFyd2RzSjFZT0FkVzdXN2pzMnFwUXdESzJxClFOK2x3STh1ei9kWDFLWnJxM1lnVXlHRWJpeGQ0Q1l3SU9VUGV2QUxaUGtrbFNFNnZhaVVCRkY1blNCWXJzZ2YKWUlxOEtLL0ZUUXcrUHpIOGl1ZlRsNlY2ZHlka0lYbVphbkZTdGExM2VaRjNNM1lwd05Mbk1Gazg2OC9UNUF4SgovWHRQNjNqOUtnNS9vbHdMTDgyQUdFajB6WDFNOU96K2NOdkNOb0J2alBrQ0F3RUFBYU1qTUNFd0RnWURWUjBQCkFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFKWTkKRHRsWkxnYkNhQUwwbzYxSVdoaVo0NlRHNnE2RmNYSEF3Yk9sMmhLSDJpUHhwOXZPc3htR2ZiZS9WckJQNkxnMgp1U3ZMZUlId3VQajA1M0MvdENvZFBTS3oyQnBnd05ZNWp0TmhYRXdJRnRpdWFIVlRJZmRnWWg3OGZNbUcwcnFSCkkwUG1ISU9tV2l1NDkyZ0JSb3hpWE1mZEc2azJ4cXlDU3lQeHVEN1NTZU1YT3ZKWWtBMHNCMlVnY1lCbTYxTGcKdzFicXloNEYvWU51U2VGRGthWjFDTlNmNUN4Mi9EOUxsQVFjSktTUnU0c1pDcC9VWmJzalBjcGQvaGtBOURwaQpBSTJzQXUyZVMxREx3dDJKMVl5RmhrdXBXUjJwR2JmdGZzWUhPa2djdnp6NzFSWTF4NkNoclhPSHY5MHVjTXBSCnhvUTNiN2E4VHg4WFhqcUtLc3c9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", 
            "cert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURSakNDQWk2Z0F3SUJBZ0lCQkRBTkJna3Foa2lHOXcwQkFRVUZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUyTkRFMU1Gb1hEVEU1TURZd09URTJOREUxTUZvdwpSekVRTUE0R0ExVUVDZ3dIVEc5bloybHVaekVTTUJBR0ExVUVDd3dKVDNCbGJsTm9hV1owTVI4d0hRWURWUVFECkRCWnplWE4wWlcwdWJHOW5aMmx1Wnk1amRYSmhkRzl5TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEEKTUlJQkNnS0NBUUVBMi9nd0MzL1pkdVJ3QnAzVElQdHA4SDdTTXI0MEpWS05KT2Q5RDBDVXBNaktjRkhCUXVFRwphM0JxR0xweTdlVUlnREU4MXFldjdPZlp3NHJIYkVESVBVekY4RlNXUUhKcTF3bHBuYW16TGV4Qmw0b3YwQ1EzCk4zOTl5WXRQaWd1UldoNUIvSXBVNHVhWmJ4UVU5Rk5PYnJ0T1VlUTlLeHZhMkxiSXNoRDd3QXdNbHByN2NQb2IKSGFQWmRmOGR6cUFoVjRJWDU0UTFJOFcvL2laaE5zd0RrcHl6MkdGTWpPZTNPcTIxanpCQW5OU3dkRGsrekp2TgptVnhoenpGWktESUQ3dDhLUnpyTjNVM2JZOEpBTUZ1SUR3TS8zd0FLRlpzMkV3aFFnQnhnTnI0bEMxblduSC9lCm9wck1Ca0pLbjdFUFJmR1pzamgrMi9xWjZLcjVSdDA2VlFJREFRQUJvMll3WkRBT0JnTlZIUThCQWY4RUJBTUMKQmFBd0NRWURWUjBUQkFJd0FEQWRCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0RBUVlJS3dZQkJRVUhBd0l3SFFZRApWUjBPQkJZRUZEWW5FTmhRZzZrWVVyWWdkcytZNUpwbzRPdzBNQWtHQTFVZEl3UUNNQUF3RFFZSktvWklodmNOCkFRRUZCUUFEZ2dFQkFEaFdWbVQ0amlDRUY2dlJmNGJJdjBFVnViMmJ2MWhXL1ZPSGJITkRidzZFR0V2eHVNZkQKdG1kTTFzeWszV1ZRTlRWamlIc25jeitXeHRDUi81N1BIR1dyeG0vcitsektNSFZOWG9LWG0yMkxPaVhhb0hsVgpuSzVXaTR6UCs5bHd4MHVLMTVtaldqdjNEcnA0bDFyZm9VSHpzTUVrOFNEWkFEUzRFT2RBTFZWRlRsZ0YvYU51Ck5YNWYzZlNIbDRLWTFGSzlxa3h4MlgzcU1KM090M3JteExvbVJRME5pSEJBN2RkMWRWMWk4aFdDRWJYSjZ1TWMKLzRKeWFxT0RtZ2U5N2ZYUDhnMU10T0VHcmsvOS8zZ0JoMzFGeVVpRE4wMFpCNXFyN1V6bkJkTVMrWmlaeXNoMApZSTR3RGl1Q2hrUlVRYzBDUXdnT3hOM3ZYYVZSeU1DZkNmbz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", 
            "key": "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRRGIrREFMZjlsMjVIQUcKbmRNZysybndmdEl5dmpRbFVvMGs1MzBQUUpTa3lNcHdVY0ZDNFFacmNHb1l1bkx0NVFpQU1UeldwNi9zNTluRAppc2RzUU1nOVRNWHdWSlpBY21yWENXbWRxYk10N0VHWGlpL1FKRGMzZjMzSmkwK0tDNUZhSGtIOGlsVGk1cGx2CkZCVDBVMDV1dTA1UjVEMHJHOXJZdHNpeUVQdkFEQXlXbXZ0dytoc2RvOWwxL3gzT29DRlhnaGZuaERVanhiLysKSm1FMnpBT1NuTFBZWVV5TTU3YzZyYldQTUVDYzFMQjBPVDdNbTgyWlhHSFBNVmtvTWdQdTN3cEhPczNkVGR0agp3a0F3VzRnUEF6L2ZBQW9WbXpZVENGQ0FIR0EydmlVTFdkYWNmOTZpbXN3R1FrcWZzUTlGOFpteU9IN2IrcG5vCnF2bEczVHBWQWdNQkFBRUNnZ0VCQUtmT3NuaFRWRWJMQmx0RjAyYjU1VnUrcEFScDdjWDdNQW5UUHloL2orWVYKOTdtdjVNdHNkMFZIUU9rVWM5RGdSSmZtK0E2WlpqQkl2MnZaVjBGU09tL3BQYWVXSC9tU1Q4YzBPd280QjZVawo4dlQwU24xeTQxekhPZUtqRUU2emZITlBXZSthYUIrZzQ3UkZrYWJDVnJkSzFQak1NWTNOZGlxVlpvM1JQdnpGCnloV09XTFNpaWZFcDhKaVM5djg3RW9RejQ0aEw1ZzJ4V2J5S084c25jS0RYVUZ5SHJYRHd6OE1qRXhyY0NBL1AKZitrbW5qWVNtQlVpdDVLd2FPNlVFZmVQUFY3aXFEQkNZQkp5d3NSeEhkd3kraUJTUFA0MFg3bjF5ZHhMM0t2ZQpMa3BENmwzWkRrcFl5bFM2Q3RTcWhkenRNMWNHV0VNcFdvcUNsL0E4cElFQ2dZRUE5aTBUV1hMdlViR3lFZG14CjZjQW5RN0MvSExrdzFXR1E3V0JLc0d1WmdKUWs3TUwyaTdldWgzeXRWbUhmaUZPZHhrQmx2NWRkVWNaelZ2VkkKMy94NlE2MkZnc1NIYk0rWTg4VWxpR3BFUXB2YzRMMEVLRzdVMnRPRTROOGl4T0o0WklzSEpsVXVJeEJrekM2UApkWFRnb0x0amUwTDR2Vmd0WDBqWTRSTmdraTBDZ1lFQTVMOWkrc1BXVWZRckhvclFBRlJEVVdCYW02eVQwVmQxCmh2S2h2dy8xUmRCZEw3UmxNVkNkSE9EOWM3Q1E2ak5YcFZ3K2RJM1Nzc0hMTjYybHVjRXF5eWRGT2k1MXB2YUMKSVF1K2pCeHI3anQwdmpFT2VOZzdJZ0tzMmRDLzkycUFQSWtvNXg2UVV1bTcwcXc4RDlxUGFQNVh0K0hBMWJqMgp0MUUwMHZBdWFja0NnWUJXUkZtQmdRdG9mM0s5V3FKRUhiZ2t1bTFlYlc4MWQybElKZlJHZW81aVJvZmlDRGhKCm9lYWhTQW1lT2RJUmZldk9LNGI4YXRpaTdYNUJ2TmI2MXdrcGFrZGRHZTJ6TGRTVnFFS00yV0J1VHh4S0RUTEYKMElENjVpQ3NoTVUwWllCM0MxbElENVdKYUJkS2pnNXJ4TGRxMi8veWsvZUNzc0hEb0UyUUlqQTNTUUtCZ0UvUQpZNlhodlI4OVZlcERQdzVPV0dyRmR2Z0lnRTh6K0FNTkY5akZwYXdsWnM1bFlFV1R2aTQ4WnRMK0VhSldKY2ZCCjV0emVPMTA1OTBGSEtxTWt2bHpDS0FxRCtYTTNZZkpMMmNHampSbER6NWtwSHNhVmxCUnE0SXJqcG9lYXRvSG8KemNmT3gwYlczVDFGOFhNS3JLMVN5YUtVZDZkYThXTEhSK290K2RQWkFvR0JBT0hhejJLSVRnNXQvakFIMFpMNgpwUk5JZjljOTlFWi8xMTJBaUVXZ0NqU0c3MTBQNDgzTzZoL0NDaDM0ckZURWJ4Y0lHUXErbEx4TDFDTzAvV0d3CkFIYjF6eVB4T25yQnZPK1h2Y1VLVFppR3RkWXdzaDVmNXAzb2h5TmFoZ2lEcFNoOGx3ai9xWWRxR0ZLMzNDUVUKMytLR0JWQ1N5T0lXbjJCRlNzVzJaRU03Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K"
        }, 
        "kind": "Secret", 
        "metadata": {
            "creationTimestamp": null, 
            "name": "logging-curator"
        }, 
        "type": "Opaque"
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:75
ok: [openshift] => {
    "ansible_facts": {
        "curator_component": "curator-ops", 
        "curator_name": "logging-curator-ops"
    }, 
    "changed": false
}

TASK [openshift_logging_curator : Generate Curator deploymentconfig] ***********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:81
ok: [openshift] => {
    "changed": false, 
    "checksum": "b115412ceb667a0cde6419c2310f58b57c29a3bb", 
    "dest": "/tmp/openshift-logging-ansible-xJvK8N/templates/curator-dc.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "d59a69b1ef0a13345ba5d0550a186b54", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 2365, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026598.46-210791982287167/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_curator : Set Curator DC] ******************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:99
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get dc logging-curator-ops -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "DeploymentConfig", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:43:19Z", 
                    "generation": 2, 
                    "labels": {
                        "component": "curator-ops", 
                        "logging-infra": "curator", 
                        "provider": "openshift"
                    }, 
                    "name": "logging-curator-ops", 
                    "namespace": "logging", 
                    "resourceVersion": "1870", 
                    "selfLink": "/oapi/v1/namespaces/logging/deploymentconfigs/logging-curator-ops", 
                    "uid": "be4ea6ed-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "replicas": 1, 
                    "selector": {
                        "component": "curator-ops", 
                        "logging-infra": "curator", 
                        "provider": "openshift"
                    }, 
                    "strategy": {
                        "activeDeadlineSeconds": 21600, 
                        "recreateParams": {
                            "timeoutSeconds": 600
                        }, 
                        "resources": {}, 
                        "rollingParams": {
                            "intervalSeconds": 1, 
                            "maxSurge": "25%", 
                            "maxUnavailable": "25%", 
                            "timeoutSeconds": 600, 
                            "updatePeriodSeconds": 1
                        }, 
                        "type": "Recreate"
                    }, 
                    "template": {
                        "metadata": {
                            "creationTimestamp": null, 
                            "labels": {
                                "component": "curator-ops", 
                                "logging-infra": "curator", 
                                "provider": "openshift"
                            }, 
                            "name": "logging-curator-ops"
                        }, 
                        "spec": {
                            "containers": [
                                {
                                    "env": [
                                        {
                                            "name": "K8S_HOST_URL", 
                                            "value": "https://kubernetes.default.svc.cluster.local"
                                        }, 
                                        {
                                            "name": "ES_HOST", 
                                            "value": "logging-es-ops"
                                        }, 
                                        {
                                            "name": "ES_PORT", 
                                            "value": "9200"
                                        }, 
                                        {
                                            "name": "ES_CLIENT_CERT", 
                                            "value": "/etc/curator/keys/cert"
                                        }, 
                                        {
                                            "name": "ES_CLIENT_KEY", 
                                            "value": "/etc/curator/keys/key"
                                        }, 
                                        {
                                            "name": "ES_CA", 
                                            "value": "/etc/curator/keys/ca"
                                        }, 
                                        {
                                            "name": "CURATOR_DEFAULT_DAYS", 
                                            "value": "30"
                                        }, 
                                        {
                                            "name": "CURATOR_RUN_HOUR", 
                                            "value": "0"
                                        }, 
                                        {
                                            "name": "CURATOR_RUN_MINUTE", 
                                            "value": "0"
                                        }, 
                                        {
                                            "name": "CURATOR_RUN_TIMEZONE", 
                                            "value": "UTC"
                                        }, 
                                        {
                                            "name": "CURATOR_SCRIPT_LOG_LEVEL", 
                                            "value": "INFO"
                                        }, 
                                        {
                                            "name": "CURATOR_LOG_LEVEL", 
                                            "value": "ERROR"
                                        }
                                    ], 
                                    "image": "172.30.197.120:5000/logging/logging-curator:latest", 
                                    "imagePullPolicy": "Always", 
                                    "name": "curator", 
                                    "resources": {
                                        "limits": {
                                            "cpu": "100m"
                                        }
                                    }, 
                                    "terminationMessagePath": "/dev/termination-log", 
                                    "terminationMessagePolicy": "File", 
                                    "volumeMounts": [
                                        {
                                            "mountPath": "/etc/curator/keys", 
                                            "name": "certs", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/curator/settings", 
                                            "name": "config", 
                                            "readOnly": true
                                        }
                                    ]
                                }
                            ], 
                            "dnsPolicy": "ClusterFirst", 
                            "restartPolicy": "Always", 
                            "schedulerName": "default-scheduler", 
                            "securityContext": {}, 
                            "serviceAccount": "aggregated-logging-curator", 
                            "serviceAccountName": "aggregated-logging-curator", 
                            "terminationGracePeriodSeconds": 30, 
                            "volumes": [
                                {
                                    "name": "certs", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-curator"
                                    }
                                }, 
                                {
                                    "configMap": {
                                        "defaultMode": 420, 
                                        "name": "logging-curator"
                                    }, 
                                    "name": "config"
                                }
                            ]
                        }
                    }, 
                    "test": false, 
                    "triggers": [
                        {
                            "type": "ConfigChange"
                        }
                    ]
                }, 
                "status": {
                    "availableReplicas": 0, 
                    "conditions": [
                        {
                            "lastTransitionTime": "2017-06-09T16:43:19Z", 
                            "lastUpdateTime": "2017-06-09T16:43:19Z", 
                            "message": "Deployment config does not have minimum availability.", 
                            "status": "False", 
                            "type": "Available"
                        }, 
                        {
                            "lastTransitionTime": "2017-06-09T16:43:19Z", 
                            "lastUpdateTime": "2017-06-09T16:43:19Z", 
                            "message": "replication controller \"logging-curator-ops-1\" is waiting for pod \"logging-curator-ops-1-deploy\" to run", 
                            "status": "Unknown", 
                            "type": "Progressing"
                        }
                    ], 
                    "details": {
                        "causes": [
                            {
                                "type": "ConfigChange"
                            }
                        ], 
                        "message": "config change"
                    }, 
                    "latestVersion": 1, 
                    "observedGeneration": 2, 
                    "replicas": 0, 
                    "unavailableReplicas": 0, 
                    "updatedReplicas": 0
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : Delete temp directory] ***********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:109
ok: [openshift] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-xJvK8N", 
    "state": "absent"
}

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:226
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:241
statically included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml

TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:2
 [WARNING]: when statements should not include jinja2 templating delimiters
such as {{ }} or {% %}. Found: {{ openshift_logging_fluentd_nodeselector.keys()
| count }} > 1
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:6
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:10
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:14
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml:3
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml:7
ok: [openshift] => {
    "ansible_facts": {
        "fluentd_version": "3_5"
    }, 
    "changed": false
}

TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml:15
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:20
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:26
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : Create temp directory for doing work in] *****
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:33
ok: [openshift] => {
    "changed": false, 
    "cmd": [
        "mktemp", 
        "-d", 
        "/tmp/openshift-logging-ansible-XXXXXX"
    ], 
    "delta": "0:00:00.002661", 
    "end": "2017-06-09 12:43:24.276723", 
    "rc": 0, 
    "start": "2017-06-09 12:43:24.274062"
}

STDOUT:

/tmp/openshift-logging-ansible-koJpuM

TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:38
ok: [openshift] => {
    "ansible_facts": {
        "tempdir": "/tmp/openshift-logging-ansible-koJpuM"
    }, 
    "changed": false
}

TASK [openshift_logging_fluentd : Create templates subdirectory] ***************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:41
ok: [openshift] => {
    "changed": false, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/openshift-logging-ansible-koJpuM/templates", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 40, 
    "state": "directory", 
    "uid": 0
}

TASK [openshift_logging_fluentd : Create Fluentd service account] **************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:51
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : Create Fluentd service account] **************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:59
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get sa aggregated-logging-fluentd -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "imagePullSecrets": [
                    {
                        "name": "aggregated-logging-fluentd-dockercfg-mvsxr"
                    }
                ], 
                "kind": "ServiceAccount", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:43:25Z", 
                    "name": "aggregated-logging-fluentd", 
                    "namespace": "logging", 
                    "resourceVersion": "1915", 
                    "selfLink": "/api/v1/namespaces/logging/serviceaccounts/aggregated-logging-fluentd", 
                    "uid": "c1cc1035-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "secrets": [
                    {
                        "name": "aggregated-logging-fluentd-dockercfg-mvsxr"
                    }, 
                    {
                        "name": "aggregated-logging-fluentd-token-llrks"
                    }
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_fluentd : Set privileged permissions for Fluentd] ******
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:68
changed: [openshift] => {
    "changed": true, 
    "present": "present", 
    "results": {
        "cmd": "/bin/oc adm policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd -n logging", 
        "results": "", 
        "returncode": 0
    }
}

TASK [openshift_logging_fluentd : Set cluster-reader permissions for Fluentd] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:77
changed: [openshift] => {
    "changed": true, 
    "present": "present", 
    "results": {
        "cmd": "/bin/oc adm policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd -n logging", 
        "results": "", 
        "returncode": 0
    }
}

TASK [openshift_logging_fluentd : template] ************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:86
ok: [openshift] => {
    "changed": false, 
    "checksum": "a8c8596f5fc2c5dd7c8d33d244af17a2555be086", 
    "dest": "/tmp/openshift-logging-ansible-koJpuM/fluent.conf", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "579698b48ffce6276ee0e8d5ac71a338", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 1301, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026607.3-113217011595320/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:94
ok: [openshift] => {
    "changed": false, 
    "checksum": "b3e75eddc4a0765edc77da092384c0c6f95440e1", 
    "dest": "/tmp/openshift-logging-ansible-koJpuM/fluentd-throttle-config.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "25871b8e0a9bedc166a6029872a6c336", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 133, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026607.79-219263278943812/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:100
ok: [openshift] => {
    "changed": false, 
    "checksum": "a3aa36da13f3108aa4ad5b98d4866007b44e9798", 
    "dest": "/tmp/openshift-logging-ansible-koJpuM/secure-forward.conf", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "1084b00c427f4fa48dfc66d6ad6555d4", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 563, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026608.22-257531190693816/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:107
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:113
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:119
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : Set Fluentd configmap] ***********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:125
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get configmap logging-fluentd -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "data": {
                    "fluent.conf": "# This file is the fluentd configuration entrypoint. Edit with care.\n\n@include configs.d/openshift/system.conf\n\n# In each section below, pre- and post- includes don't include anything initially;\n# they exist to enable future additions to openshift conf as needed.\n\n## sources\n## ordered so that syslog always runs last...\n@include configs.d/openshift/input-pre-*.conf\n@include configs.d/dynamic/input-docker-*.conf\n@include configs.d/dynamic/input-syslog-*.conf\n@include configs.d/openshift/input-post-*.conf\n##\n\n<label @INGRESS>\n## filters\n  @include configs.d/openshift/filter-pre-*.conf\n  @include configs.d/openshift/filter-retag-journal.conf\n  @include configs.d/openshift/filter-k8s-meta.conf\n  @include configs.d/openshift/filter-kibana-transform.conf\n  @include configs.d/openshift/filter-k8s-flatten-hash.conf\n  @include configs.d/openshift/filter-k8s-record-transform.conf\n  @include configs.d/openshift/filter-syslog-record-transform.conf\n  @include configs.d/openshift/filter-viaq-data-model.conf\n  @include configs.d/openshift/filter-post-*.conf\n##\n\n## matches\n  @include configs.d/openshift/output-pre-*.conf\n  @include configs.d/openshift/output-operations.conf\n  @include configs.d/openshift/output-applications.conf\n  # no post - applications.conf matches everything left\n##\n</label>\n", 
                    "secure-forward.conf": "# @type secure_forward\n\n# self_hostname ${HOSTNAME}\n# shared_key <SECRET_STRING>\n\n# secure yes\n# enable_strict_verification yes\n\n# ca_cert_path /etc/fluent/keys/your_ca_cert\n# ca_private_key_path /etc/fluent/keys/your_private_key\n  # for private CA secret key\n# ca_private_key_passphrase passphrase\n\n# <server>\n  # or IP\n#   host server.fqdn.example.com\n#   port 24284\n# </server>\n# <server>\n  # ip address to connect\n#   host 203.0.113.8\n  # specify hostlabel for FQDN verification if ipaddress is used for host\n#   hostlabel server.fqdn.example.com\n# </server>\n", 
                    "throttle-config.yaml": "# Logging example fluentd throttling config file\n\n#example-project:\n#  read_lines_limit: 10\n#\n#.operations:\n#  read_lines_limit: 100\n"
                }, 
                "kind": "ConfigMap", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:43:29Z", 
                    "name": "logging-fluentd", 
                    "namespace": "logging", 
                    "resourceVersion": "1929", 
                    "selfLink": "/api/v1/namespaces/logging/configmaps/logging-fluentd", 
                    "uid": "c415144b-4d32-11e7-ae30-0e910886c5dc"
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_fluentd : Set logging-fluentd secret] ******************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:137
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc secrets new logging-fluentd ca=/etc/origin/logging/ca.crt key=/etc/origin/logging/system.logging.fluentd.key cert=/etc/origin/logging/system.logging.fluentd.crt -n logging", 
        "results": "", 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_fluentd : Generate logging-fluentd daemonset definition] ***
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:154
ok: [openshift] => {
    "changed": false, 
    "checksum": "dd01ce9daab656e9aa6427e81e9e835bdca09818", 
    "dest": "/tmp/openshift-logging-ansible-koJpuM/templates/logging-fluentd.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "d23a03fd95a910891139005bec4629ba", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 3415, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497026610.2-37796803911658/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_fluentd : Set logging-fluentd daemonset] ***************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:172
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get daemonset logging-fluentd -o json -n logging", 
        "results": [
            {
                "apiVersion": "extensions/v1beta1", 
                "kind": "DaemonSet", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T16:43:31Z", 
                    "generation": 1, 
                    "labels": {
                        "component": "fluentd", 
                        "logging-infra": "fluentd", 
                        "provider": "openshift"
                    }, 
                    "name": "logging-fluentd", 
                    "namespace": "logging", 
                    "resourceVersion": "1932", 
                    "selfLink": "/apis/extensions/v1beta1/namespaces/logging/daemonsets/logging-fluentd", 
                    "uid": "c542447e-4d32-11e7-ae30-0e910886c5dc"
                }, 
                "spec": {
                    "selector": {
                        "matchLabels": {
                            "component": "fluentd", 
                            "provider": "openshift"
                        }
                    }, 
                    "template": {
                        "metadata": {
                            "creationTimestamp": null, 
                            "labels": {
                                "component": "fluentd", 
                                "logging-infra": "fluentd", 
                                "provider": "openshift"
                            }, 
                            "name": "fluentd-elasticsearch"
                        }, 
                        "spec": {
                            "containers": [
                                {
                                    "env": [
                                        {
                                            "name": "K8S_HOST_URL", 
                                            "value": "https://kubernetes.default.svc.cluster.local"
                                        }, 
                                        {
                                            "name": "ES_HOST", 
                                            "value": "logging-es"
                                        }, 
                                        {
                                            "name": "ES_PORT", 
                                            "value": "9200"
                                        }, 
                                        {
                                            "name": "ES_CLIENT_CERT", 
                                            "value": "/etc/fluent/keys/cert"
                                        }, 
                                        {
                                            "name": "ES_CLIENT_KEY", 
                                            "value": "/etc/fluent/keys/key"
                                        }, 
                                        {
                                            "name": "ES_CA", 
                                            "value": "/etc/fluent/keys/ca"
                                        }, 
                                        {
                                            "name": "OPS_HOST", 
                                            "value": "logging-es-ops"
                                        }, 
                                        {
                                            "name": "OPS_PORT", 
                                            "value": "9200"
                                        }, 
                                        {
                                            "name": "OPS_CLIENT_CERT", 
                                            "value": "/etc/fluent/keys/cert"
                                        }, 
                                        {
                                            "name": "OPS_CLIENT_KEY", 
                                            "value": "/etc/fluent/keys/key"
                                        }, 
                                        {
                                            "name": "OPS_CA", 
                                            "value": "/etc/fluent/keys/ca"
                                        }, 
                                        {
                                            "name": "ES_COPY", 
                                            "value": "false"
                                        }, 
                                        {
                                            "name": "USE_JOURNAL", 
                                            "value": "true"
                                        }, 
                                        {
                                            "name": "JOURNAL_SOURCE"
                                        }, 
                                        {
                                            "name": "JOURNAL_READ_FROM_HEAD", 
                                            "value": "false"
                                        }
                                    ], 
                                    "image": "172.30.197.120:5000/logging/logging-fluentd:latest", 
                                    "imagePullPolicy": "Always", 
                                    "name": "fluentd-elasticsearch", 
                                    "resources": {
                                        "limits": {
                                            "cpu": "100m", 
                                            "memory": "512Mi"
                                        }
                                    }, 
                                    "securityContext": {
                                        "privileged": true
                                    }, 
                                    "terminationMessagePath": "/dev/termination-log", 
                                    "terminationMessagePolicy": "File", 
                                    "volumeMounts": [
                                        {
                                            "mountPath": "/run/log/journal", 
                                            "name": "runlogjournal"
                                        }, 
                                        {
                                            "mountPath": "/var/log", 
                                            "name": "varlog"
                                        }, 
                                        {
                                            "mountPath": "/var/lib/docker/containers", 
                                            "name": "varlibdockercontainers", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/fluent/configs.d/user", 
                                            "name": "config", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/fluent/keys", 
                                            "name": "certs", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/docker-hostname", 
                                            "name": "dockerhostname", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/localtime", 
                                            "name": "localtime", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/sysconfig/docker", 
                                            "name": "dockercfg", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/docker", 
                                            "name": "dockerdaemoncfg", 
                                            "readOnly": true
                                        }
                                    ]
                                }
                            ], 
                            "dnsPolicy": "ClusterFirst", 
                            "nodeSelector": {
                                "logging-infra-fluentd": "true"
                            }, 
                            "restartPolicy": "Always", 
                            "schedulerName": "default-scheduler", 
                            "securityContext": {}, 
                            "serviceAccount": "aggregated-logging-fluentd", 
                            "serviceAccountName": "aggregated-logging-fluentd", 
                            "terminationGracePeriodSeconds": 30, 
                            "volumes": [
                                {
                                    "hostPath": {
                                        "path": "/run/log/journal"
                                    }, 
                                    "name": "runlogjournal"
                                }, 
                                {
                                    "hostPath": {
                                        "path": "/var/log"
                                    }, 
                                    "name": "varlog"
                                }, 
                                {
                                    "hostPath": {
                                        "path": "/var/lib/docker/containers"
                                    }, 
                                    "name": "varlibdockercontainers"
                                }, 
                                {
                                    "configMap": {
                                        "defaultMode": 420, 
                                        "name": "logging-fluentd"
                                    }, 
                                    "name": "config"
                                }, 
                                {
                                    "name": "certs", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-fluentd"
                                    }
                                }, 
                                {
                                    "hostPath": {
                                        "path": "/etc/hostname"
                                    }, 
                                    "name": "dockerhostname"
                                }, 
                                {
                                    "hostPath": {
                                        "path": "/etc/localtime"
                                    }, 
                                    "name": "localtime"
                                }, 
                                {
                                    "hostPath": {
                                        "path": "/etc/sysconfig/docker"
                                    }, 
                                    "name": "dockercfg"
                                }, 
                                {
                                    "hostPath": {
                                        "path": "/etc/docker"
                                    }, 
                                    "name": "dockerdaemoncfg"
                                }
                            ]
                        }
                    }, 
                    "templateGeneration": 1, 
                    "updateStrategy": {
                        "rollingUpdate": {
                            "maxUnavailable": 1
                        }, 
                        "type": "RollingUpdate"
                    }
                }, 
                "status": {
                    "currentNumberScheduled": 0, 
                    "desiredNumberScheduled": 0, 
                    "numberMisscheduled": 0, 
                    "numberReady": 0, 
                    "observedGeneration": 1
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_fluentd : Retrieve list of Fluentd hosts] **************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:183
ok: [openshift] => {
    "changed": false, 
    "results": {
        "cmd": "/bin/oc get node -o json -n default", 
        "results": [
            {
                "apiVersion": "v1", 
                "items": [
                    {
                        "apiVersion": "v1", 
                        "kind": "Node", 
                        "metadata": {
                            "annotations": {
                                "volumes.kubernetes.io/controller-managed-attach-detach": "true"
                            }, 
                            "creationTimestamp": "2017-06-09T16:10:39Z", 
                            "labels": {
                                "beta.kubernetes.io/arch": "amd64", 
                                "beta.kubernetes.io/os": "linux", 
                                "kubernetes.io/hostname": "172.18.7.3"
                            }, 
                            "name": "172.18.7.3", 
                            "namespace": "", 
                            "resourceVersion": "1882", 
                            "selfLink": "/api/v1/nodes/172.18.7.3", 
                            "uid": "2e56a673-4d2e-11e7-ae30-0e910886c5dc"
                        }, 
                        "spec": {
                            "externalID": "172.18.7.3", 
                            "providerID": "aws:////i-0c2f17a3c842910cc"
                        }, 
                        "status": {
                            "addresses": [
                                {
                                    "address": "172.18.7.3", 
                                    "type": "LegacyHostIP"
                                }, 
                                {
                                    "address": "172.18.7.3", 
                                    "type": "InternalIP"
                                }, 
                                {
                                    "address": "172.18.7.3", 
                                    "type": "Hostname"
                                }
                            ], 
                            "allocatable": {
                                "cpu": "4", 
                                "memory": "15902404Ki", 
                                "pods": "40"
                            }, 
                            "capacity": {
                                "cpu": "4", 
                                "memory": "16004804Ki", 
                                "pods": "40"
                            }, 
                            "conditions": [
                                {
                                    "lastHeartbeatTime": "2017-06-09T16:43:22Z", 
                                    "lastTransitionTime": "2017-06-09T16:10:39Z", 
                                    "message": "kubelet has sufficient disk space available", 
                                    "reason": "KubeletHasSufficientDisk", 
                                    "status": "False", 
                                    "type": "OutOfDisk"
                                }, 
                                {
                                    "lastHeartbeatTime": "2017-06-09T16:43:22Z", 
                                    "lastTransitionTime": "2017-06-09T16:10:39Z", 
                                    "message": "kubelet has sufficient memory available", 
                                    "reason": "KubeletHasSufficientMemory", 
                                    "status": "False", 
                                    "type": "MemoryPressure"
                                }, 
                                {
                                    "lastHeartbeatTime": "2017-06-09T16:43:22Z", 
                                    "lastTransitionTime": "2017-06-09T16:10:39Z", 
                                    "message": "kubelet has no disk pressure", 
                                    "reason": "KubeletHasNoDiskPressure", 
                                    "status": "False", 
                                    "type": "DiskPressure"
                                }, 
                                {
                                    "lastHeartbeatTime": "2017-06-09T16:43:22Z", 
                                    "lastTransitionTime": "2017-06-09T16:10:49Z", 
                                    "message": "kubelet is posting ready status", 
                                    "reason": "KubeletReady", 
                                    "status": "True", 
                                    "type": "Ready"
                                }
                            ], 
                            "daemonEndpoints": {
                                "kubeletEndpoint": {
                                    "Port": 10250
                                }
                            }, 
                            "images": [
                                {
                                    "names": [
                                        "openshift/origin-federation:1565480", 
                                        "openshift/origin-federation:latest"
                                    ], 
                                    "sizeBytes": 1202835711
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-docker-registry@sha256:7934d28a00d728e3841980d772acd0f593426d3c4f731547d4d80917ef371b4a", 
                                        "docker.io/openshift/origin-docker-registry:latest"
                                    ], 
                                    "sizeBytes": 1100571644
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-docker-registry:1565480", 
                                        "openshift/origin-docker-registry:latest"
                                    ], 
                                    "sizeBytes": 1097491120
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-gitserver:1565480", 
                                        "openshift/origin-gitserver:latest"
                                    ], 
                                    "sizeBytes": 1083915063
                                }, 
                                {
                                    "names": [
                                        "openshift/openvswitch:1565480", 
                                        "openshift/openvswitch:latest"
                                    ], 
                                    "sizeBytes": 1051376470
                                }, 
                                {
                                    "names": [
                                        "openshift/node:1565480", 
                                        "openshift/node:latest"
                                    ], 
                                    "sizeBytes": 1049694710
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-keepalived-ipfailover:1565480", 
                                        "openshift/origin-keepalived-ipfailover:latest"
                                    ], 
                                    "sizeBytes": 1026502477
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-haproxy-router:1565480", 
                                        "openshift/origin-haproxy-router:latest"
                                    ], 
                                    "sizeBytes": 1020731460
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-docker-builder:1565480", 
                                        "openshift/origin-docker-builder:latest"
                                    ], 
                                    "sizeBytes": 999702306
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-sti-builder:1565480", 
                                        "openshift/origin-sti-builder:latest"
                                    ], 
                                    "sizeBytes": 999702306
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-f5-router:1565480", 
                                        "openshift/origin-f5-router:latest"
                                    ], 
                                    "sizeBytes": 999702306
                                }, 
                                {
                                    "names": [
                                        "openshift/origin:1565480", 
                                        "openshift/origin:latest"
                                    ], 
                                    "sizeBytes": 999702306
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-deployer:1565480", 
                                        "openshift/origin-deployer:latest"
                                    ], 
                                    "sizeBytes": 999702306
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-recycler:1565480", 
                                        "openshift/origin-recycler:latest"
                                    ], 
                                    "sizeBytes": 999702306
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-cluster-capacity:1565480", 
                                        "openshift/origin-cluster-capacity:latest"
                                    ], 
                                    "sizeBytes": 960445554
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-release@sha256:611f304562f9fed81fa5348ba39ffc3da008ab55eb8f8b18fdfaa598721958aa", 
                                        "docker.io/openshift/origin-release:golang-1.7"
                                    ], 
                                    "sizeBytes": 852470564
                                }, 
                                {
                                    "names": [
                                        "172.30.197.120:5000/logging/logging-auth-proxy@sha256:5d8be8224babbc44bd0a18b7c119e8b3f7d7ae423badf4e24a4effef3b349075", 
                                        "172.30.197.120:5000/logging/logging-auth-proxy:latest"
                                    ], 
                                    "sizeBytes": 715536009
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-logging-auth-proxy:latest"
                                    ], 
                                    "sizeBytes": 715535365
                                }, 
                                {
                                    "names": [
                                        "docker.io/node@sha256:46db0dd19955beb87b841c30a6b9812ba626473283e84117d1c016deee5949a9", 
                                        "docker.io/node:0.10.36"
                                    ], 
                                    "sizeBytes": 697128386
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-logging-deployer:latest", 
                                        "docker.io/openshift/origin-logging-deployment:latest"
                                    ], 
                                    "sizeBytes": 696821590
                                }, 
                                {
                                    "names": [
                                        "172.30.197.120:5000/logging/logging-kibana@sha256:cb5431f12f16be7067ab4c646954c92f103a0561ad5a6b80f559a0f615cbfe58", 
                                        "172.30.197.120:5000/logging/logging-kibana:latest"
                                    ], 
                                    "sizeBytes": 682851525
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-logging-kibana:latest"
                                    ], 
                                    "sizeBytes": 682851459
                                }, 
                                {
                                    "names": [
                                        "172.30.197.120:5000/logging/logging-elasticsearch@sha256:f71368cec919537d9141b3a26681acbadb1910728a59778445a897ba0536cc1a", 
                                        "172.30.197.120:5000/logging/logging-elasticsearch:latest"
                                    ], 
                                    "sizeBytes": 623513080
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin@sha256:6beee719d8e2555af8d0fc9d3d1bdd623f601417a21948cd4d6146f0cf1d1a46", 
                                        "docker.io/openshift/origin:v1.5.0-alpha.2"
                                    ], 
                                    "sizeBytes": 526224196
                                }, 
                                {
                                    "names": [
                                        "172.30.197.120:5000/logging/logging-fluentd@sha256:dc86ab83eb3868e695a75c9f096e2a2f13bedf92fd9a6792bff6d0e2254d08c5", 
                                        "172.30.197.120:5000/logging/logging-fluentd:latest"
                                    ], 
                                    "sizeBytes": 472184983
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-logging-elasticsearch:latest"
                                    ], 
                                    "sizeBytes": 425567171
                                }, 
                                {
                                    "names": [
                                        "172.30.197.120:5000/logging/logging-curator@sha256:3d7c8bd14b6f1f5f09234345251a6113c350743217bc71b19b4b9a4cb166e8ec", 
                                        "172.30.197.120:5000/logging/logging-curator:latest"
                                    ], 
                                    "sizeBytes": 418288076
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-egress-http-proxy:1565480", 
                                        "openshift/origin-egress-http-proxy:latest"
                                    ], 
                                    "sizeBytes": 396010589
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/base-centos7@sha256:aea292a3bddba020cde0ee83e6a45807931eb607c164ec6a3674f67039d8cd7c", 
                                        "docker.io/openshift/base-centos7:latest"
                                    ], 
                                    "sizeBytes": 383049978
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-egress-router:1565480", 
                                        "openshift/origin-egress-router:latest"
                                    ], 
                                    "sizeBytes": 364746296
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-base:latest"
                                    ], 
                                    "sizeBytes": 363070636
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-logging-fluentd:latest"
                                    ], 
                                    "sizeBytes": 359225972
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-logging-curator:latest"
                                    ], 
                                    "sizeBytes": 224977240
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-pod:1565480", 
                                        "openshift/origin-pod:latest"
                                    ], 
                                    "sizeBytes": 213198358
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-source:latest"
                                    ], 
                                    "sizeBytes": 192548895
                                }, 
                                {
                                    "names": [
                                        "docker.io/centos@sha256:aebf12af704307dfa0079b3babdca8d7e8ff6564696882bcb5d11f1d461f9ee9", 
                                        "docker.io/centos:7", 
                                        "docker.io/centos:centos7"
                                    ], 
                                    "sizeBytes": 192548537
                                }, 
                                {
                                    "names": [
                                        "openshift/hello-openshift:1565480", 
                                        "openshift/hello-openshift:latest"
                                    ], 
                                    "sizeBytes": 5635113
                                }
                            ], 
                            "nodeInfo": {
                                "architecture": "amd64", 
                                "bootID": "e2c9c401-80c3-4618-89c1-eb89e7ba797a", 
                                "containerRuntimeVersion": "docker://1.12.6", 
                                "kernelVersion": "3.10.0-514.21.1.el7.x86_64", 
                                "kubeProxyVersion": "v1.6.1+5115d708d7", 
                                "kubeletVersion": "v1.6.1+5115d708d7", 
                                "machineID": "f9370ed252a14f73b014c1301a9b6d1b", 
                                "operatingSystem": "linux", 
                                "osImage": "Red Hat Enterprise Linux Server 7.3 (Maipo)", 
                                "systemUUID": "EC2BF994-37D2-F763-F920-620715B7E844"
                            }
                        }
                    }
                ], 
                "kind": "List", 
                "metadata": {}, 
                "resourceVersion": "", 
                "selfLink": ""
            }
        ], 
        "returncode": 0
    }, 
    "state": "list"
}

TASK [openshift_logging_fluentd : Set openshift_logging_fluentd_hosts] *********
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:190
ok: [openshift] => {
    "ansible_facts": {
        "openshift_logging_fluentd_hosts": [
            "172.18.7.3"
        ]
    }, 
    "changed": false
}

TASK [openshift_logging_fluentd : include] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:195
included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml for openshift

TASK [openshift_logging_fluentd : Label 172.18.7.3 for Fluentd deployment] *****
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml:2
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc label node 172.18.7.3 logging-infra-fluentd=true --overwrite", 
        "results": "", 
        "returncode": 0
    }, 
    "state": "add"
}

TASK [openshift_logging_fluentd : command] *************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml:10
changed: [openshift -> 127.0.0.1] => {
    "changed": true, 
    "cmd": [
        "sleep", 
        "0.5"
    ], 
    "delta": "0:00:00.502467", 
    "end": "2017-06-09 12:43:34.389412", 
    "rc": 0, 
    "start": "2017-06-09 12:43:33.886945"
}

TASK [openshift_logging_fluentd : Delete temp directory] ***********************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:202
ok: [openshift] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-koJpuM", 
    "state": "absent"
}

TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:253
included: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/update_master_config.yaml for openshift

TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/main.yaml:36
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Cleaning up local temp dir] **************************
task path: /tmp/tmp.YFsh2WnHw8/openhift-ansible/roles/openshift_logging/tasks/main.yaml:40
ok: [openshift -> 127.0.0.1] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-JLU047", 
    "state": "absent"
}
META: ran handlers
META: ran handlers

PLAY [Update Master configs] ***************************************************
skipping: no hosts matched

PLAY RECAP *********************************************************************
localhost                  : ok=2    changed=0    unreachable=0    failed=0   
openshift                  : ok=213  changed=71   unreachable=0    failed=0   

/data/src/github.com/openshift/origin-aggregated-logging
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:170: executing 'oc get pods -l component=es' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.267s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:170: executing 'oc get pods -l component=es' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME                                      READY     STATUS    RESTARTS   AGE
logging-es-data-master-oawpjydu-1-f676p   1/1       Running   0          1m

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:171: executing 'oc get pods -l component=kibana' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.257s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:171: executing 'oc get pods -l component=kibana' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME                     READY     STATUS    RESTARTS   AGE
logging-kibana-1-nxzg9   2/2       Running   0          43s

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:172: executing 'oc get pods -l component=curator' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.317s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:172: executing 'oc get pods -l component=curator' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME                      READY     STATUS    RESTARTS   AGE
logging-curator-1-gw294   1/1       Running   0          21s

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:175: executing 'oc get pods -l component=es-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.251s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:175: executing 'oc get pods -l component=es-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME                                          READY     STATUS    RESTARTS   AGE
logging-es-ops-data-master-n5j3wlaj-1-h1fvk   1/1       Running   0          1m

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:176: executing 'oc get pods -l component=kibana-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.303s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:176: executing 'oc get pods -l component=kibana-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME                         READY     STATUS    RESTARTS   AGE
logging-kibana-ops-1-3jqc8   2/2       Running   0          30s

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:177: executing 'oc get pods -l component=curator-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.263s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:177: executing 'oc get pods -l component=curator-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME                          READY     STATUS    RESTARTS   AGE
logging-curator-ops-1-s67ss   1/1       Running   0          15s

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:185: executing 'oc project logging > /dev/null' expecting success...
SUCCESS after 0.259s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:185: executing 'oc project logging > /dev/null' expecting success
There was no output from the command.
There was no error output from the command.
/data/src/github.com/openshift/origin-aggregated-logging/hack/testing /data/src/github.com/openshift/origin-aggregated-logging
--> Deploying template "logging/logging-fluentd-template-maker" for "-" to project logging

     logging-fluentd-template-maker
     ---------
     Template to create template for fluentd

     * With parameters:
        * MASTER_URL=https://kubernetes.default.svc.cluster.local
        * ES_HOST=logging-es
        * ES_PORT=9200
        * ES_CLIENT_CERT=/etc/fluent/keys/cert
        * ES_CLIENT_KEY=/etc/fluent/keys/key
        * ES_CA=/etc/fluent/keys/ca
        * OPS_HOST=logging-es-ops
        * OPS_PORT=9200
        * OPS_CLIENT_CERT=/etc/fluent/keys/cert
        * OPS_CLIENT_KEY=/etc/fluent/keys/key
        * OPS_CA=/etc/fluent/keys/ca
        * ES_COPY=false
        * ES_COPY_HOST=
        * ES_COPY_PORT=
        * ES_COPY_SCHEME=https
        * ES_COPY_CLIENT_CERT=
        * ES_COPY_CLIENT_KEY=
        * ES_COPY_CA=
        * ES_COPY_USERNAME=
        * ES_COPY_PASSWORD=
        * OPS_COPY_HOST=
        * OPS_COPY_PORT=
        * OPS_COPY_SCHEME=https
        * OPS_COPY_CLIENT_CERT=
        * OPS_COPY_CLIENT_KEY=
        * OPS_COPY_CA=
        * OPS_COPY_USERNAME=
        * OPS_COPY_PASSWORD=
        * IMAGE_PREFIX_DEFAULT=172.30.197.120:5000/logging/
        * IMAGE_VERSION_DEFAULT=latest
        * USE_JOURNAL=
        * JOURNAL_SOURCE=
        * JOURNAL_READ_FROM_HEAD=false
        * USE_MUX=false
        * USE_MUX_CLIENT=false
        * MUX_ALLOW_EXTERNAL=false
        * BUFFER_QUEUE_LIMIT=1024
        * BUFFER_SIZE_LIMIT=16777216

--> Creating resources ...
    template "logging-fluentd-template" created
--> Success
    Run 'oc status' to view your app.
START wait_for_fluentd_to_catch_up at 2017-06-09 16:43:51.332336876+00:00
added es message 822bfa50-c8df-4811-a55f-10753e1c5ff4
added es-ops message a978f988-116f-40ea-a418-3bd0514c1b17
good - wait_for_fluentd_to_catch_up: found 1 record project logging for 822bfa50-c8df-4811-a55f-10753e1c5ff4
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for a978f988-116f-40ea-a418-3bd0514c1b17
END wait_for_fluentd_to_catch_up took 11 seconds at 2017-06-09 16:44:02.506830461+00:00
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:223: executing 'oc login --username=admin --password=admin' expecting success...
SUCCESS after 0.262s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:223: executing 'oc login --username=admin --password=admin' expecting success
Standard output from the command:
Login successful.

You don't have any projects. You can try to create a new project, by running

    oc new-project <projectname>


There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:224: executing 'oc login --username=system:admin' expecting success...
SUCCESS after 0.299s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:224: executing 'oc login --username=system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.7.3:8443" as "system:admin" using existing credentials.

You have access to the following projects and can switch between them with 'oc project <projectname>':

  * default
    kube-public
    kube-system
    logging
    openshift
    openshift-infra

Using project "default".

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:225: executing 'oadm policy add-cluster-role-to-user cluster-admin admin' expecting success...
SUCCESS after 0.310s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:225: executing 'oadm policy add-cluster-role-to-user cluster-admin admin' expecting success
Standard output from the command:
cluster role "cluster-admin" added: "admin"

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:226: executing 'oc login --username=loguser --password=loguser' expecting success...
SUCCESS after 0.273s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:226: executing 'oc login --username=loguser --password=loguser' expecting success
Standard output from the command:
Login successful.

You don't have any projects. You can try to create a new project, by running

    oc new-project <projectname>


There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:227: executing 'oc login --username=system:admin' expecting success...
SUCCESS after 0.263s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:227: executing 'oc login --username=system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.7.3:8443" as "system:admin" using existing credentials.

You have access to the following projects and can switch between them with 'oc project <projectname>':

  * default
    kube-public
    kube-system
    logging
    openshift
    openshift-infra

Using project "default".

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:228: executing 'oc project logging > /dev/null' expecting success...
SUCCESS after 0.256s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:228: executing 'oc project logging > /dev/null' expecting success
There was no output from the command.
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:229: executing 'oadm policy add-role-to-user view loguser' expecting success...
SUCCESS after 0.261s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:229: executing 'oadm policy add-role-to-user view loguser' expecting success
Standard output from the command:
role "view" added: "loguser"

There was no error output from the command.
Checking if Elasticsearch logging-es-data-master-oawpjydu-1-f676p is ready
{
    "_id": "0",
    "_index": ".searchguard.logging-es-data-master-oawpjydu-1-f676p",
    "_shards": {
        "failed": 0,
        "successful": 1,
        "total": 1
    },
    "_type": "rolesmapping",
    "_version": 2,
    "created": false
}
Checking if Elasticsearch logging-es-ops-data-master-n5j3wlaj-1-h1fvk is ready
{
    "_id": "0",
    "_index": ".searchguard.logging-es-ops-data-master-n5j3wlaj-1-h1fvk",
    "_shards": {
        "failed": 0,
        "successful": 1,
        "total": 1
    },
    "_type": "rolesmapping",
    "_version": 2,
    "created": false
}
------------------------------------------
     Test 'admin' user can access cluster stats
------------------------------------------
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:265: executing 'test 200 = 200' expecting success...
SUCCESS after 0.012s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:265: executing 'test 200 = 200' expecting success
There was no output from the command.
There was no error output from the command.
------------------------------------------
     Test 'admin' user can access cluster stats for OPS cluster
------------------------------------------
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:274: executing 'test 200 = 200' expecting success...
SUCCESS after 0.013s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:274: executing 'test 200 = 200' expecting success
There was no output from the command.
There was no error output from the command.
Running e2e tests
Checking installation of the EFK stack...
Running test/cluster/rollout.sh:20: executing 'oc project logging' expecting success...
SUCCESS after 0.295s: test/cluster/rollout.sh:20: executing 'oc project logging' expecting success
Standard output from the command:
Already on project "logging" on server "https://172.18.7.3:8443".

There was no error output from the command.
[INFO] Checking for DeploymentConfigurations...
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-kibana' expecting success...
SUCCESS after 0.303s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-kibana' expecting success
Standard output from the command:
NAME             REVISION   DESIRED   CURRENT   TRIGGERED BY
logging-kibana   1          1         1         config

There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-kibana' expecting success...
SUCCESS after 0.278s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-kibana' expecting success
Standard output from the command:
replication controller "logging-kibana-1" successfully rolled out

There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-curator' expecting success...
SUCCESS after 0.268s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-curator' expecting success
Standard output from the command:
NAME              REVISION   DESIRED   CURRENT   TRIGGERED BY
logging-curator   1          1         1         config

There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-curator' expecting success...
SUCCESS after 0.245s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-curator' expecting success
Standard output from the command:
replication controller "logging-curator-1" successfully rolled out

There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-kibana-ops' expecting success...
SUCCESS after 0.255s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-kibana-ops' expecting success
Standard output from the command:
NAME                 REVISION   DESIRED   CURRENT   TRIGGERED BY
logging-kibana-ops   1          1         1         config

There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-kibana-ops' expecting success...
SUCCESS after 0.311s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-kibana-ops' expecting success
Standard output from the command:
replication controller "logging-kibana-ops-1" successfully rolled out

There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-curator-ops' expecting success...
SUCCESS after 0.244s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-curator-ops' expecting success
Standard output from the command:
NAME                  REVISION   DESIRED   CURRENT   TRIGGERED BY
logging-curator-ops   1          1         1         config

There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-curator-ops' expecting success...
SUCCESS after 0.243s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-curator-ops' expecting success
Standard output from the command:
replication controller "logging-curator-ops-1" successfully rolled out

There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-es-data-master-oawpjydu' expecting success...
SUCCESS after 0.266s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-es-data-master-oawpjydu' expecting success
Standard output from the command:
NAME                              REVISION   DESIRED   CURRENT   TRIGGERED BY
logging-es-data-master-oawpjydu   1          1         1         config

There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-es-data-master-oawpjydu' expecting success...
SUCCESS after 0.278s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-es-data-master-oawpjydu' expecting success
Standard output from the command:
replication controller "logging-es-data-master-oawpjydu-1" successfully rolled out

There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-es-ops-data-master-n5j3wlaj' expecting success...
SUCCESS after 0.280s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-es-ops-data-master-n5j3wlaj' expecting success
Standard output from the command:
NAME                                  REVISION   DESIRED   CURRENT   TRIGGERED BY
logging-es-ops-data-master-n5j3wlaj   1          1         1         config

There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-es-ops-data-master-n5j3wlaj' expecting success...
SUCCESS after 0.253s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-es-ops-data-master-n5j3wlaj' expecting success
Standard output from the command:
replication controller "logging-es-ops-data-master-n5j3wlaj-1" successfully rolled out

There was no error output from the command.
[INFO] Checking for Routes...
Running test/cluster/rollout.sh:30: executing 'oc get route logging-kibana' expecting success...
SUCCESS after 0.287s: test/cluster/rollout.sh:30: executing 'oc get route logging-kibana' expecting success
Standard output from the command:
NAME             HOST/PORT                                 PATH      SERVICES         PORT      TERMINATION          WILDCARD
logging-kibana   kibana.router.default.svc.cluster.local             logging-kibana   <all>     reencrypt/Redirect   None

There was no error output from the command.
Running test/cluster/rollout.sh:30: executing 'oc get route logging-kibana-ops' expecting success...
SUCCESS after 0.246s: test/cluster/rollout.sh:30: executing 'oc get route logging-kibana-ops' expecting success
Standard output from the command:
NAME                 HOST/PORT                                     PATH      SERVICES             PORT      TERMINATION          WILDCARD
logging-kibana-ops   kibana-ops.router.default.svc.cluster.local             logging-kibana-ops   <all>     reencrypt/Redirect   None

There was no error output from the command.
[INFO] Checking for Services...
Running test/cluster/rollout.sh:35: executing 'oc get service logging-es' expecting success...
SUCCESS after 0.262s: test/cluster/rollout.sh:35: executing 'oc get service logging-es' expecting success
Standard output from the command:
NAME         CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
logging-es   172.30.93.153   <none>        9200/TCP   2m

There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-es-cluster' expecting success...
SUCCESS after 0.245s: test/cluster/rollout.sh:35: executing 'oc get service logging-es-cluster' expecting success
Standard output from the command:
NAME                 CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
logging-es-cluster   172.30.11.253   <none>        9300/TCP   2m

There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-kibana' expecting success...
SUCCESS after 0.253s: test/cluster/rollout.sh:35: executing 'oc get service logging-kibana' expecting success
Standard output from the command:
NAME             CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
logging-kibana   172.30.209.208   <none>        443/TCP   1m

There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-es-ops' expecting success...
SUCCESS after 0.297s: test/cluster/rollout.sh:35: executing 'oc get service logging-es-ops' expecting success
Standard output from the command:
NAME             CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
logging-es-ops   172.30.217.237   <none>        9200/TCP   1m

There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-es-ops-cluster' expecting success...
SUCCESS after 0.259s: test/cluster/rollout.sh:35: executing 'oc get service logging-es-ops-cluster' expecting success
Standard output from the command:
NAME                     CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
logging-es-ops-cluster   172.30.230.244   <none>        9300/TCP   1m

There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-kibana-ops' expecting success...
SUCCESS after 0.244s: test/cluster/rollout.sh:35: executing 'oc get service logging-kibana-ops' expecting success
Standard output from the command:
NAME                 CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
logging-kibana-ops   172.30.193.185   <none>        443/TCP   1m

There was no error output from the command.
[INFO] Checking for OAuthClients...
Running test/cluster/rollout.sh:40: executing 'oc get oauthclient kibana-proxy' expecting success...
SUCCESS after 0.247s: test/cluster/rollout.sh:40: executing 'oc get oauthclient kibana-proxy' expecting success
Standard output from the command:
NAME           SECRET                                                             WWW-CHALLENGE   REDIRECT URIS
kibana-proxy   AohHiVm6HSxFBdszf0bVUicNkedhabU3ogYkY7MoZZLAa182qxevjkyB4r8uegVl   FALSE           https://kibana.router.default.svc.cluster.local,https://kibana-ops.router.default.svc.cluster.local

There was no error output from the command.
[INFO] Checking for DaemonSets...
Running test/cluster/rollout.sh:45: executing 'oc get daemonset logging-fluentd' expecting success...
SUCCESS after 0.246s: test/cluster/rollout.sh:45: executing 'oc get daemonset logging-fluentd' expecting success
Standard output from the command:
NAME              DESIRED   CURRENT   READY     UP-TO-DATE   AVAILABLE   NODE-SELECTOR                AGE
logging-fluentd   1         1         1         1            1           logging-infra-fluentd=true   58s

There was no error output from the command.
Running test/cluster/rollout.sh:47: executing 'oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'' expecting any result and text '1'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.283s: test/cluster/rollout.sh:47: executing 'oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'' expecting any result and text '1'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
1
There was no error output from the command.
Checking for log entry matches between ES and their sources...
Running test/cluster/functionality.sh:40: executing 'oc login --username=admin --password=admin' expecting success...
SUCCESS after 0.265s: test/cluster/functionality.sh:40: executing 'oc login --username=admin --password=admin' expecting success
Standard output from the command:
Login successful.

You have access to the following projects and can switch between them with 'oc project <projectname>':

    default
    kube-public
    kube-system
  * logging
    openshift
    openshift-infra

Using project "logging".

There was no error output from the command.
Running test/cluster/functionality.sh:44: executing 'oc login --username=system:admin' expecting success...
SUCCESS after 0.307s: test/cluster/functionality.sh:44: executing 'oc login --username=system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.7.3:8443" as "system:admin" using existing credentials.

You have access to the following projects and can switch between them with 'oc project <projectname>':

    default
    kube-public
    kube-system
  * logging
    openshift
    openshift-infra

Using project "logging".

There was no error output from the command.
Running test/cluster/functionality.sh:45: executing 'oc project logging' expecting success...
SUCCESS after 0.277s: test/cluster/functionality.sh:45: executing 'oc project logging' expecting success
Standard output from the command:
Already on project "logging" on server "https://172.18.7.3:8443".

There was no error output from the command.
[INFO] Testing Kibana pod logging-kibana-1-nxzg9 for a successful start...
Running test/cluster/functionality.sh:52: executing 'oc exec logging-kibana-1-nxzg9 -c kibana -- curl -s --request HEAD --write-out '%{response_code}' http://localhost:5601/' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 120.353s: test/cluster/functionality.sh:52: executing 'oc exec logging-kibana-1-nxzg9 -c kibana -- curl -s --request HEAD --write-out '%{response_code}' http://localhost:5601/' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:53: executing 'oc get pod logging-kibana-1-nxzg9 -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.266s: test/cluster/functionality.sh:53: executing 'oc get pod logging-kibana-1-nxzg9 -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
Running test/cluster/functionality.sh:54: executing 'oc get pod logging-kibana-1-nxzg9 -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana-proxy")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.268s: test/cluster/functionality.sh:54: executing 'oc get pod logging-kibana-1-nxzg9 -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana-proxy")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
[INFO] Testing Elasticsearch pod logging-es-data-master-oawpjydu-1-f676p for a successful start...
Running test/cluster/functionality.sh:59: executing 'curl_es 'logging-es-data-master-oawpjydu-1-f676p' '/' -X HEAD -w '%{response_code}'' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.430s: test/cluster/functionality.sh:59: executing 'curl_es 'logging-es-data-master-oawpjydu-1-f676p' '/' -X HEAD -w '%{response_code}'' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:60: executing 'oc get pod logging-es-data-master-oawpjydu-1-f676p -o jsonpath='{ .status.containerStatuses[?(@.name=="elasticsearch")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.246s: test/cluster/functionality.sh:60: executing 'oc get pod logging-es-data-master-oawpjydu-1-f676p -o jsonpath='{ .status.containerStatuses[?(@.name=="elasticsearch")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
[INFO] Checking that Elasticsearch pod logging-es-data-master-oawpjydu-1-f676p recovered its indices after starting...
Running test/cluster/functionality.sh:63: executing 'curl_es 'logging-es-data-master-oawpjydu-1-f676p' '/_cluster/state/master_node' -w '%{response_code}'' expecting any result and text '}200$'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.422s: test/cluster/functionality.sh:63: executing 'curl_es 'logging-es-data-master-oawpjydu-1-f676p' '/_cluster/state/master_node' -w '%{response_code}'' expecting any result and text '}200$'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
{"cluster_name":"logging-es","master_node":"A-XZ1AqTR8KI5MOCBCbijA"}200
There was no error output from the command.
[INFO] Elasticsearch pod logging-es-data-master-oawpjydu-1-f676p is the master
[INFO] Checking that Elasticsearch pod logging-es-data-master-oawpjydu-1-f676p has persisted indices created by Fluentd...
Running test/cluster/functionality.sh:76: executing 'curl_es 'logging-es-data-master-oawpjydu-1-f676p' '/_cat/indices?h=index'' expecting any result and text '^(project|\.operations)\.'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.460s: test/cluster/functionality.sh:76: executing 'curl_es 'logging-es-data-master-oawpjydu-1-f676p' '/_cat/indices?h=index'' expecting any result and text '^(project|\.operations)\.'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997                
project.logging.30bbfebb-4d2e-11e7-ae30-0e910886c5dc.2017.06.09 
project.default.2bbde7ca-4d2e-11e7-ae30-0e910886c5dc.2017.06.09 
.searchguard.logging-es-data-master-oawpjydu-1-f676p            
.kibana                                                         

There was no error output from the command.
[INFO] Cheking for index project.logging.30bbfebb-4d2e-11e7-ae30-0e910886c5dc with Kibana pod logging-kibana-1-nxzg9...
Running test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-1-nxzg9' 'logging-es:9200' 'project.logging.30bbfebb-4d2e-11e7-ae30-0e910886c5dc' '/var/log/containers/*_30bbfebb-4d2e-11e7-ae30-0e910886c5dc_*.log' '500' 'admin' 'wllrYiCuvEkCwGlhV4F8nEaOuYBTHoMmBLNvEn5wIEE' '127.0.0.1'' expecting success...
SUCCESS after 10.229s: test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-1-nxzg9' 'logging-es:9200' 'project.logging.30bbfebb-4d2e-11e7-ae30-0e910886c5dc' '/var/log/containers/*_30bbfebb-4d2e-11e7-ae30-0e910886c5dc_*.log' '500' 'admin' 'wllrYiCuvEkCwGlhV4F8nEaOuYBTHoMmBLNvEn5wIEE' '127.0.0.1'' expecting success
Standard output from the command:
Executing command [oc exec logging-kibana-1-nxzg9 -- curl -s --key /etc/kibana/keys/key --cert /etc/kibana/keys/cert --cacert /etc/kibana/keys/ca -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer wllrYiCuvEkCwGlhV4F8nEaOuYBTHoMmBLNvEn5wIEE' -H 'X-Forwarded-For: 127.0.0.1' -XGET "https://logging-es:9200/project.logging.30bbfebb-4d2e-11e7-ae30-0e910886c5dc.*/_search?q=hostname:ip-172-18-7-3&fields=message&size=500"]
Failure - no log entries found in Elasticsearch logging-es:9200 for index project.logging.30bbfebb-4d2e-11e7-ae30-0e910886c5dc

There was no error output from the command.
[INFO] Cheking for index project.default.2bbde7ca-4d2e-11e7-ae30-0e910886c5dc with Kibana pod logging-kibana-1-nxzg9...
Running test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-1-nxzg9' 'logging-es:9200' 'project.default.2bbde7ca-4d2e-11e7-ae30-0e910886c5dc' '/var/log/containers/*_2bbde7ca-4d2e-11e7-ae30-0e910886c5dc_*.log' '500' 'admin' 'wllrYiCuvEkCwGlhV4F8nEaOuYBTHoMmBLNvEn5wIEE' '127.0.0.1'' expecting success...
SUCCESS after 0.704s: test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-1-nxzg9' 'logging-es:9200' 'project.default.2bbde7ca-4d2e-11e7-ae30-0e910886c5dc' '/var/log/containers/*_2bbde7ca-4d2e-11e7-ae30-0e910886c5dc_*.log' '500' 'admin' 'wllrYiCuvEkCwGlhV4F8nEaOuYBTHoMmBLNvEn5wIEE' '127.0.0.1'' expecting success
Standard output from the command:
Executing command [oc exec logging-kibana-1-nxzg9 -- curl -s --key /etc/kibana/keys/key --cert /etc/kibana/keys/cert --cacert /etc/kibana/keys/ca -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer wllrYiCuvEkCwGlhV4F8nEaOuYBTHoMmBLNvEn5wIEE' -H 'X-Forwarded-For: 127.0.0.1' -XGET "https://logging-es:9200/project.default.2bbde7ca-4d2e-11e7-ae30-0e910886c5dc.*/_search?q=hostname:ip-172-18-7-3&fields=message&size=500"]
Failure - no log entries found in Elasticsearch logging-es:9200 for index project.default.2bbde7ca-4d2e-11e7-ae30-0e910886c5dc

There was no error output from the command.
[INFO] Checking that Elasticsearch pod logging-es-data-master-oawpjydu-1-f676p contains common data model index templates...
Running test/cluster/functionality.sh:105: executing 'oc exec logging-es-data-master-oawpjydu-1-f676p -- ls -1 /usr/share/elasticsearch/index_templates' expecting success...
SUCCESS after 0.337s: test/cluster/functionality.sh:105: executing 'oc exec logging-es-data-master-oawpjydu-1-f676p -- ls -1 /usr/share/elasticsearch/index_templates' expecting success
Standard output from the command:
com.redhat.viaq-openshift-operations.template.json
com.redhat.viaq-openshift-project.template.json
org.ovirt.viaq-collectd.template.json

There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-oawpjydu-1-f676p' '/_template/com.redhat.viaq-openshift-operations.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.437s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-oawpjydu-1-f676p' '/_template/com.redhat.viaq-openshift-operations.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-oawpjydu-1-f676p' '/_template/com.redhat.viaq-openshift-project.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.427s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-oawpjydu-1-f676p' '/_template/com.redhat.viaq-openshift-project.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-oawpjydu-1-f676p' '/_template/org.ovirt.viaq-collectd.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.424s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-oawpjydu-1-f676p' '/_template/org.ovirt.viaq-collectd.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:40: executing 'oc login --username=admin --password=admin' expecting success...
SUCCESS after 0.271s: test/cluster/functionality.sh:40: executing 'oc login --username=admin --password=admin' expecting success
Standard output from the command:
Login successful.

You have access to the following projects and can switch between them with 'oc project <projectname>':

    default
    kube-public
    kube-system
  * logging
    openshift
    openshift-infra

Using project "logging".

There was no error output from the command.
Running test/cluster/functionality.sh:44: executing 'oc login --username=system:admin' expecting success...
SUCCESS after 0.299s: test/cluster/functionality.sh:44: executing 'oc login --username=system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.7.3:8443" as "system:admin" using existing credentials.

You have access to the following projects and can switch between them with 'oc project <projectname>':

    default
    kube-public
    kube-system
  * logging
    openshift
    openshift-infra

Using project "logging".

There was no error output from the command.
Running test/cluster/functionality.sh:45: executing 'oc project logging' expecting success...
SUCCESS after 0.285s: test/cluster/functionality.sh:45: executing 'oc project logging' expecting success
Standard output from the command:
Already on project "logging" on server "https://172.18.7.3:8443".

There was no error output from the command.
[INFO] Testing Kibana pod logging-kibana-ops-1-3jqc8 for a successful start...
Running test/cluster/functionality.sh:52: executing 'oc exec logging-kibana-ops-1-3jqc8 -c kibana -- curl -s --request HEAD --write-out '%{response_code}' http://localhost:5601/' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 120.330s: test/cluster/functionality.sh:52: executing 'oc exec logging-kibana-ops-1-3jqc8 -c kibana -- curl -s --request HEAD --write-out '%{response_code}' http://localhost:5601/' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:53: executing 'oc get pod logging-kibana-ops-1-3jqc8 -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.308s: test/cluster/functionality.sh:53: executing 'oc get pod logging-kibana-ops-1-3jqc8 -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
Running test/cluster/functionality.sh:54: executing 'oc get pod logging-kibana-ops-1-3jqc8 -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana-proxy")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.272s: test/cluster/functionality.sh:54: executing 'oc get pod logging-kibana-ops-1-3jqc8 -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana-proxy")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
[INFO] Testing Elasticsearch pod logging-es-ops-data-master-n5j3wlaj-1-h1fvk for a successful start...
Running test/cluster/functionality.sh:59: executing 'curl_es 'logging-es-ops-data-master-n5j3wlaj-1-h1fvk' '/' -X HEAD -w '%{response_code}'' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.450s: test/cluster/functionality.sh:59: executing 'curl_es 'logging-es-ops-data-master-n5j3wlaj-1-h1fvk' '/' -X HEAD -w '%{response_code}'' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:60: executing 'oc get pod logging-es-ops-data-master-n5j3wlaj-1-h1fvk -o jsonpath='{ .status.containerStatuses[?(@.name=="elasticsearch")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.248s: test/cluster/functionality.sh:60: executing 'oc get pod logging-es-ops-data-master-n5j3wlaj-1-h1fvk -o jsonpath='{ .status.containerStatuses[?(@.name=="elasticsearch")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
[INFO] Checking that Elasticsearch pod logging-es-ops-data-master-n5j3wlaj-1-h1fvk recovered its indices after starting...
Running test/cluster/functionality.sh:63: executing 'curl_es 'logging-es-ops-data-master-n5j3wlaj-1-h1fvk' '/_cluster/state/master_node' -w '%{response_code}'' expecting any result and text '}200$'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.442s: test/cluster/functionality.sh:63: executing 'curl_es 'logging-es-ops-data-master-n5j3wlaj-1-h1fvk' '/_cluster/state/master_node' -w '%{response_code}'' expecting any result and text '}200$'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
{"cluster_name":"logging-es-ops","master_node":"wBEK-BFER4ufQ_mBrx4OWA"}200
There was no error output from the command.
[INFO] Elasticsearch pod logging-es-ops-data-master-n5j3wlaj-1-h1fvk is the master
[INFO] Checking that Elasticsearch pod logging-es-ops-data-master-n5j3wlaj-1-h1fvk has persisted indices created by Fluentd...
Running test/cluster/functionality.sh:76: executing 'curl_es 'logging-es-ops-data-master-n5j3wlaj-1-h1fvk' '/_cat/indices?h=index'' expecting any result and text '^(project|\.operations)\.'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.503s: test/cluster/functionality.sh:76: executing 'curl_es 'logging-es-ops-data-master-n5j3wlaj-1-h1fvk' '/_cat/indices?h=index'' expecting any result and text '^(project|\.operations)\.'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997         
.searchguard.logging-es-ops-data-master-n5j3wlaj-1-h1fvk 
.operations.2017.06.09                                   
.kibana                                                  

There was no error output from the command.
[INFO] Cheking for index .operations with Kibana pod logging-kibana-ops-1-3jqc8...
Running test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-ops-1-3jqc8' 'logging-es-ops:9200' '.operations' '/var/log/messages' '500' 'admin' 'PNrSnVXbqvQ1pYW6_nRhqArqjIMWqh0pJ-IrPG0M4_c' '127.0.0.1'' expecting success...
SUCCESS after 0.863s: test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-ops-1-3jqc8' 'logging-es-ops:9200' '.operations' '/var/log/messages' '500' 'admin' 'PNrSnVXbqvQ1pYW6_nRhqArqjIMWqh0pJ-IrPG0M4_c' '127.0.0.1'' expecting success
Standard output from the command:
Executing command [oc exec logging-kibana-ops-1-3jqc8 -- curl -s --key /etc/kibana/keys/key --cert /etc/kibana/keys/cert --cacert /etc/kibana/keys/ca -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer PNrSnVXbqvQ1pYW6_nRhqArqjIMWqh0pJ-IrPG0M4_c' -H 'X-Forwarded-For: 127.0.0.1' -XGET "https://logging-es-ops:9200/.operations.*/_search?q=hostname:ip-172-18-7-3&fields=message&size=500"]
Failure - no log entries found in Elasticsearch logging-es-ops:9200 for index .operations

There was no error output from the command.
[INFO] Checking that Elasticsearch pod logging-es-ops-data-master-n5j3wlaj-1-h1fvk contains common data model index templates...
Running test/cluster/functionality.sh:105: executing 'oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- ls -1 /usr/share/elasticsearch/index_templates' expecting success...
SUCCESS after 0.347s: test/cluster/functionality.sh:105: executing 'oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- ls -1 /usr/share/elasticsearch/index_templates' expecting success
Standard output from the command:
com.redhat.viaq-openshift-operations.template.json
com.redhat.viaq-openshift-project.template.json
org.ovirt.viaq-collectd.template.json

There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-n5j3wlaj-1-h1fvk' '/_template/com.redhat.viaq-openshift-operations.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.425s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-n5j3wlaj-1-h1fvk' '/_template/com.redhat.viaq-openshift-operations.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-n5j3wlaj-1-h1fvk' '/_template/com.redhat.viaq-openshift-project.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.450s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-n5j3wlaj-1-h1fvk' '/_template/com.redhat.viaq-openshift-project.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-n5j3wlaj-1-h1fvk' '/_template/org.ovirt.viaq-collectd.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.426s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-n5j3wlaj-1-h1fvk' '/_template/org.ovirt.viaq-collectd.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
running test test-curator.sh
configmap "logging-curator" deleted
configmap "logging-curator" created
deploymentconfig "logging-curator" scaled
deploymentconfig "logging-curator" scaled
configmap "logging-curator" deleted
configmap "logging-curator" created
deploymentconfig "logging-curator" scaled
deploymentconfig "logging-curator" scaled
configmap "logging-curator" deleted
configmap "logging-curator" created
deploymentconfig "logging-curator" scaled
deploymentconfig "logging-curator" scaled
current indices before 1st deletion are:
.kibana
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997
.operations.curatortest.2017.03.31
.operations.curatortest.2017.06.09
.searchguard.logging-es-data-master-oawpjydu-1-f676p
default-index.curatortest.2017.05.09
default-index.curatortest.2017.06.09
project.default.2bbde7ca-4d2e-11e7-ae30-0e910886c5dc.2017.06.09
project.logging.30bbfebb-4d2e-11e7-ae30-0e910886c5dc.2017.06.09
project.project-dev.curatortest.2017.06.08
project.project-dev.curatortest.2017.06.09
project.project-prod.curatortest.2017.05.12
project.project-prod.curatortest.2017.06.09
project.project-qe.curatortest.2017.06.02
project.project-qe.curatortest.2017.06.09
project.project2-qe.curatortest.2017.06.02
project.project2-qe.curatortest.2017.06.09
project.project3-qe.curatortest.2017.06.02
project.project3-qe.curatortest.2017.06.09
Fri Jun  9 16:50:57 UTC 2017
configmap "logging-curator" deleted
configmap "logging-curator" created
deploymentconfig "logging-curator" scaled
deploymentconfig "logging-curator" scaled
current indices after 1st deletion are:
.kibana
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997
.operations.curatortest.2017.06.09
.searchguard.logging-es-data-master-oawpjydu-1-f676p
default-index.curatortest.2017.06.09
project.default.2bbde7ca-4d2e-11e7-ae30-0e910886c5dc.2017.06.09
project.logging.30bbfebb-4d2e-11e7-ae30-0e910886c5dc.2017.06.09
project.project-dev.curatortest.2017.06.09
project.project-prod.curatortest.2017.06.09
project.project-qe.curatortest.2017.06.09
project.project2-qe.curatortest.2017.06.09
project.project3-qe.curatortest.2017.06.09
good - index project.project-dev.curatortest.2017.06.09 is present
good - index project.project-dev.curatortest.2017.06.08 is missing
good - index project.project-qe.curatortest.2017.06.09 is present
good - index project.project-qe.curatortest.2017.06.02 is missing
good - index project.project-prod.curatortest.2017.06.09 is present
good - index project.project-prod.curatortest.2017.05.12 is missing
good - index .operations.curatortest.2017.06.09 is present
good - index .operations.curatortest.2017.03.31 is missing
good - index default-index.curatortest.2017.06.09 is present
good - index default-index.curatortest.2017.05.09 is missing
good - index project.project2-qe.curatortest.2017.06.09 is present
good - index project.project2-qe.curatortest.2017.06.02 is missing
good - index project.project3-qe.curatortest.2017.06.09 is present
good - index project.project3-qe.curatortest.2017.06.02 is missing
current indices before 2nd deletion are:
.kibana
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997
.operations.curatortest.2017.03.31
.operations.curatortest.2017.06.09
.searchguard.logging-es-data-master-oawpjydu-1-f676p
default-index.curatortest.2017.05.09
default-index.curatortest.2017.06.09
project.default.2bbde7ca-4d2e-11e7-ae30-0e910886c5dc.2017.06.09
project.logging.30bbfebb-4d2e-11e7-ae30-0e910886c5dc.2017.06.09
project.project-dev.curatortest.2017.06.08
project.project-dev.curatortest.2017.06.09
project.project-prod.curatortest.2017.05.12
project.project-prod.curatortest.2017.06.09
project.project-qe.curatortest.2017.06.02
project.project-qe.curatortest.2017.06.09
project.project2-qe.curatortest.2017.06.02
project.project2-qe.curatortest.2017.06.09
project.project3-qe.curatortest.2017.06.02
project.project3-qe.curatortest.2017.06.09
sleeping 218 seconds to see if runhour and runminute are working . . .
verify indices deletion again
current indices after 2nd deletion are:
.kibana
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997
.operations.curatortest.2017.06.09
.searchguard.logging-es-data-master-oawpjydu-1-f676p
default-index.curatortest.2017.06.09
project.default.2bbde7ca-4d2e-11e7-ae30-0e910886c5dc.2017.06.09
project.logging.30bbfebb-4d2e-11e7-ae30-0e910886c5dc.2017.06.09
project.project-dev.curatortest.2017.06.09
project.project-prod.curatortest.2017.06.09
project.project-qe.curatortest.2017.06.09
project.project2-qe.curatortest.2017.06.09
project.project3-qe.curatortest.2017.06.09
good - index project.project-dev.curatortest.2017.06.09 is present
good - index project.project-dev.curatortest.2017.06.08 is missing
good - index project.project-qe.curatortest.2017.06.09 is present
good - index project.project-qe.curatortest.2017.06.02 is missing
good - index project.project-prod.curatortest.2017.06.09 is present
good - index project.project-prod.curatortest.2017.05.12 is missing
good - index .operations.curatortest.2017.06.09 is present
good - index .operations.curatortest.2017.03.31 is missing
good - index default-index.curatortest.2017.06.09 is present
good - index default-index.curatortest.2017.05.09 is missing
good - index project.project2-qe.curatortest.2017.06.09 is present
good - index project.project2-qe.curatortest.2017.06.02 is missing
good - index project.project3-qe.curatortest.2017.06.09 is present
good - index project.project3-qe.curatortest.2017.06.02 is missing
current indices before 1st deletion are:
.kibana
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997
.operations.2017.06.09
.operations.curatortest.2017.03.31
.operations.curatortest.2017.06.09
.searchguard.logging-es-ops-data-master-n5j3wlaj-1-h1fvk
default-index.curatortest.2017.05.09
default-index.curatortest.2017.06.09
project.project-dev.curatortest.2017.06.08
project.project-dev.curatortest.2017.06.09
project.project-prod.curatortest.2017.05.12
project.project-prod.curatortest.2017.06.09
project.project-qe.curatortest.2017.06.02
project.project-qe.curatortest.2017.06.09
project.project2-qe.curatortest.2017.06.02
project.project2-qe.curatortest.2017.06.09
project.project3-qe.curatortest.2017.06.02
project.project3-qe.curatortest.2017.06.09
Fri Jun  9 16:56:24 UTC 2017
configmap "logging-curator" deleted
configmap "logging-curator" created
deploymentconfig "logging-curator-ops" scaled
deploymentconfig "logging-curator-ops" scaled
current indices after 1st deletion are:
.kibana
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997
.operations.2017.06.09
.operations.curatortest.2017.06.09
.searchguard.logging-es-ops-data-master-n5j3wlaj-1-h1fvk
default-index.curatortest.2017.06.09
project.project-dev.curatortest.2017.06.09
project.project-prod.curatortest.2017.06.09
project.project-qe.curatortest.2017.06.09
project.project2-qe.curatortest.2017.06.09
project.project3-qe.curatortest.2017.06.09
good - index project.project-dev.curatortest.2017.06.09 is present
good - index project.project-dev.curatortest.2017.06.08 is missing
good - index project.project-qe.curatortest.2017.06.09 is present
good - index project.project-qe.curatortest.2017.06.02 is missing
good - index project.project-prod.curatortest.2017.06.09 is present
good - index project.project-prod.curatortest.2017.05.12 is missing
good - index .operations.curatortest.2017.06.09 is present
good - index .operations.curatortest.2017.03.31 is missing
good - index default-index.curatortest.2017.06.09 is present
good - index default-index.curatortest.2017.05.09 is missing
good - index project.project2-qe.curatortest.2017.06.09 is present
good - index project.project2-qe.curatortest.2017.06.02 is missing
good - index project.project3-qe.curatortest.2017.06.09 is present
good - index project.project3-qe.curatortest.2017.06.02 is missing
current indices before 2nd deletion are:
.kibana
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997
.operations.2017.06.09
.operations.curatortest.2017.03.31
.operations.curatortest.2017.06.09
.searchguard.logging-es-ops-data-master-n5j3wlaj-1-h1fvk
default-index.curatortest.2017.05.09
default-index.curatortest.2017.06.09
project.project-dev.curatortest.2017.06.08
project.project-dev.curatortest.2017.06.09
project.project-prod.curatortest.2017.05.12
project.project-prod.curatortest.2017.06.09
project.project-qe.curatortest.2017.06.02
project.project-qe.curatortest.2017.06.09
project.project2-qe.curatortest.2017.06.02
project.project2-qe.curatortest.2017.06.09
project.project3-qe.curatortest.2017.06.02
project.project3-qe.curatortest.2017.06.09
sleeping 209 seconds to see if runhour and runminute are working . . .
verify indices deletion again
current indices after 2nd deletion are:
.kibana
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997
.operations.2017.06.09
.operations.curatortest.2017.06.09
.searchguard.logging-es-ops-data-master-n5j3wlaj-1-h1fvk
default-index.curatortest.2017.06.09
project.project-dev.curatortest.2017.06.09
project.project-prod.curatortest.2017.06.09
project.project-qe.curatortest.2017.06.09
project.project2-qe.curatortest.2017.06.09
project.project3-qe.curatortest.2017.06.09
good - index project.project-dev.curatortest.2017.06.09 is present
good - index project.project-dev.curatortest.2017.06.08 is missing
good - index project.project-qe.curatortest.2017.06.09 is present
good - index project.project-qe.curatortest.2017.06.02 is missing
good - index project.project-prod.curatortest.2017.06.09 is present
good - index project.project-prod.curatortest.2017.05.12 is missing
good - index .operations.curatortest.2017.06.09 is present
good - index .operations.curatortest.2017.03.31 is missing
good - index default-index.curatortest.2017.06.09 is present
good - index default-index.curatortest.2017.05.09 is missing
good - index project.project2-qe.curatortest.2017.06.09 is present
good - index project.project2-qe.curatortest.2017.06.02 is missing
good - index project.project3-qe.curatortest.2017.06.09 is present
good - index project.project3-qe.curatortest.2017.06.02 is missing
curator running [5] jobs
curator run finish
curator running [5] jobs
curator run finish
configmap "logging-curator" deleted
configmap "logging-curator" created
deploymentconfig "logging-curator-ops" scaled
deploymentconfig "logging-curator-ops" scaled
running test test-datetime-future.sh
++ set -o nounset
++ set -o pipefail
++ type get_running_pod
++ [[ 1 -ne 1 ]]
++ [[ true = \f\a\l\s\e ]]
++ CLUSTER=true
++ ops=-ops
++ INDEX_PREFIX=
++ ARTIFACT_DIR=/tmp/origin-aggregated-logging/artifacts
++ '[' '!' -d /tmp/origin-aggregated-logging/artifacts ']'
++ get_test_user_token
++ local current_project
+++ oc project -q
++ current_project=logging
++ oc login --username=admin --password=admin
+++ oc whoami -t
++ test_token=l_I-WnDx9XmvH9tiUXH7div3zrVQmlWOCbJ7W5EZd8M
+++ oc whoami
++ test_name=admin
++ test_ip=127.0.0.1
++ oc login --username=system:admin
++ oc project logging
++ TEST_DIVIDER=------------------------------------------
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ fpod=logging-fluentd-0xnjn
+++ date +%z
++ nodetz=-0400
+++ oc exec logging-fluentd-0xnjn -- date +%z
Good - node timezone -0400 EDT is equal to the fluentd pod timezone
++ podtz=-0400
++ '[' x-0400 = x-0400 ']'
+++ date +%Z
++ echo Good - node timezone -0400 EDT is equal to the fluentd pod timezone
++ docker_uses_journal
++ type -p docker
++ grep -q 'Logging Driver: journald'
++ sudo docker info
++ grep -q '^[^#].*"log-driver":' /etc/docker/daemon.json
++ grep -q '^OPTIONS='\''[^'\'']*--log-driver=journald' /etc/sysconfig/docker
++ return 0
The rest of the test is not applicable when using the journal - skipping
++ echo The rest of the test is not applicable when using the journal - skipping
++ exit 0
running test test-es-copy.sh
++ set -o nounset
++ set -o pipefail
++ type get_running_pod
++ [[ 1 -ne 1 ]]
++ [[ true = \f\a\l\s\e ]]
++ CLUSTER=true
++ ops=-ops
++ INDEX_PREFIX=
++ PROJ_PREFIX=project.
++ ARTIFACT_DIR=/tmp/origin-aggregated-logging/artifacts
++ '[' '!' -d /tmp/origin-aggregated-logging/artifacts ']'
++ get_test_user_token
++ local current_project
+++ oc project -q
++ current_project=logging
++ oc login --username=admin --password=admin
+++ oc whoami -t
++ test_token=tRFojbih6t2pX-TZjsO94a5MiyCHiuRS4KACxrNh23k
+++ oc whoami
++ test_name=admin
++ test_ip=127.0.0.1
++ oc login --username=system:admin
++ oc project logging
++ TEST_DIVIDER=------------------------------------------
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ fpod=logging-fluentd-0xnjn
+++ mktemp
++ cfg=/tmp/tmp.kaTtaVXymT
++ oc get template logging-fluentd-template -o yaml
++ sed '/- name: ES_COPY/,/value:/ s/value: .*$/value: "false"/'
++ oc replace -f -
template "logging-fluentd-template" replaced
++ restart_fluentd
++ oc delete daemonset logging-fluentd
daemonset "logging-fluentd" deleted
++ wait_for_pod_ACTION stop logging-fluentd-0xnjn
++ local ii=120
++ local incr=10
++ '[' stop = start ']'
++ curpod=logging-fluentd-0xnjn
++ '[' -z logging-fluentd-0xnjn -a -n '' ']'
++ '[' 120 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-0xnjn
++ '[' stop = start ']'
++ break
++ '[' 120 -le 0 ']'
++ return 0
++ oc process logging-fluentd-template
++ oc create -f -
daemonset "logging-fluentd" created
++ wait_for_pod_ACTION start fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
pod for component=fluentd not running yet
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
++ '[' -n '' ']'
++ '[' -n 1 ']'
++ echo pod for component=fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=logging-fluentd-d8w86
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-fluentd-d8w86 ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ fpod=logging-fluentd-d8w86
+++ mktemp
++ origconfig=/tmp/tmp.zg2gri58ul
++ oc get template logging-fluentd-template -o yaml
++ write_and_verify_logs 1
++ rc=0
++ wait_for_fluentd_to_catch_up '' '' 1
+++ date +%s
++ local starttime=1497028042
+++ date -u --rfc-3339=ns
START wait_for_fluentd_to_catch_up at 2017-06-09 17:07:22.477594629+00:00
++ echo START wait_for_fluentd_to_catch_up at 2017-06-09 17:07:22.477594629+00:00
+++ get_running_pod es
+++ oc get pods -l component=es
+++ awk -v sel=es '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_pod=logging-es-data-master-oawpjydu-1-f676p
+++ get_running_pod es-ops
+++ oc get pods -l component=es-ops
+++ awk -v sel=es-ops '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_ops_pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ '[' -z logging-es-ops-data-master-n5j3wlaj-1-h1fvk ']'
+++ uuidgen
++ local uuid_es=0da989c2-efc9-42ff-b899-19dad780959b
+++ uuidgen
++ local uuid_es_ops=848682bd-e3e9-496f-b898-07fce186ac23
++ local expected=1
++ local timeout=300
++ add_test_message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ local kib_pod=logging-kibana-1-nxzg9
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s http://localhost:5601/0da989c2-efc9-42ff-b899-19dad780959b
added es message 0da989c2-efc9-42ff-b899-19dad780959b
++ echo added es message 0da989c2-efc9-42ff-b899-19dad780959b
++ logger -i -p local6.info -t 848682bd-e3e9-496f-b898-07fce186ac23 848682bd-e3e9-496f-b898-07fce186ac23
added es-ops message 848682bd-e3e9-496f-b898-07fce186ac23
++ echo added es-ops message 848682bd-e3e9-496f-b898-07fce186ac23
++ local rc=0
++ espod=logging-es-data-master-oawpjydu-1-f676p
++ myproject=project.logging
++ mymessage=0da989c2-efc9-42ff-b899-19dad780959b
++ expected=1
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 296 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 295 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 294 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 293 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 292 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 291 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 290 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 289 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 0da989c2-efc9-42ff-b899-19dad780959b
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:0da989c2-efc9-42ff-b899-19dad780959b'
good - wait_for_fluentd_to_catch_up: found 1 record project logging for 0da989c2-efc9-42ff-b899-19dad780959b
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 289 -le 0 ']'
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project logging for 0da989c2-efc9-42ff-b899-19dad780959b
++ espod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ myproject=.operations
++ mymessage=848682bd-e3e9-496f-b898-07fce186ac23
++ expected=1
++ myfield=systemd.u.SYSLOG_IDENTIFIER
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 848682bd-e3e9-496f-b898-07fce186ac23
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:848682bd-e3e9-496f-b898-07fce186ac23' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:848682bd-e3e9-496f-b898-07fce186ac23'
+++ shift
+++ shift
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:848682bd-e3e9-496f-b898-07fce186ac23'
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 848682bd-e3e9-496f-b898-07fce186ac23
END wait_for_fluentd_to_catch_up took 18 seconds at 2017-06-09 17:07:40.297647319+00:00
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 300 -le 0 ']'
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 848682bd-e3e9-496f-b898-07fce186ac23
++ '[' -n '' ']'
++ '[' -n '' ']'
+++ date +%s
++ local endtime=1497028060
+++ expr 1497028060 - 1497028042
+++ date -u --rfc-3339=ns
++ echo END wait_for_fluentd_to_catch_up took 18 seconds at 2017-06-09 17:07:40.297647319+00:00
++ return 0
++ '[' 0 -ne 0 ']'
++ return 0
++ trap cleanup INT TERM EXIT
+++ mktemp
++ nocopy=/tmp/tmp.nSNqbeOn4h
++ sed /_COPY/,/value/d /tmp/tmp.zg2gri58ul
+++ mktemp
++ envpatch=/tmp/tmp.7CsfzKY5Mn
++ sed -n '/^        - env:/,/^          image:/ {
/^          image:/d
/^        - env:/d
/name: K8S_HOST_URL/,/value/d
s/ES_/ES_COPY_/
s/OPS_/OPS_COPY_/
p
}' /tmp/tmp.nSNqbeOn4h
++ cat
++ cat /tmp/tmp.nSNqbeOn4h
++ sed '/^        - env:/r /tmp/tmp.7CsfzKY5Mn'
++ oc replace -f -
template "logging-fluentd-template" replaced
++ rm -f /tmp/tmp.7CsfzKY5Mn /tmp/tmp.nSNqbeOn4h
++ restart_fluentd
++ oc delete daemonset logging-fluentd
daemonset "logging-fluentd" deleted
++ wait_for_pod_ACTION stop logging-fluentd-d8w86
++ local ii=120
++ local incr=10
++ '[' stop = start ']'
++ curpod=logging-fluentd-d8w86
++ '[' -z logging-fluentd-d8w86 -a -n '' ']'
++ '[' 120 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-d8w86
++ '[' stop = start ']'
++ break
++ '[' 120 -le 0 ']'
++ return 0
++ oc process logging-fluentd-template
++ oc create -f -
daemonset "logging-fluentd" created
++ wait_for_pod_ACTION start fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
pod for component=fluentd not running yet
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
++ '[' -n '' ']'
++ '[' -n 1 ']'
++ echo pod for component=fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=logging-fluentd-v9z2k
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-fluentd-v9z2k ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
START wait_for_fluentd_to_catch_up at 2017-06-09 17:08:02.174011541+00:00
++ fpod=logging-fluentd-v9z2k
++ write_and_verify_logs 2
++ rc=0
++ wait_for_fluentd_to_catch_up '' '' 2
+++ date +%s
++ local starttime=1497028082
+++ date -u --rfc-3339=ns
++ echo START wait_for_fluentd_to_catch_up at 2017-06-09 17:08:02.174011541+00:00
+++ get_running_pod es
+++ oc get pods -l component=es
+++ awk -v sel=es '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_pod=logging-es-data-master-oawpjydu-1-f676p
+++ get_running_pod es-ops
+++ oc get pods -l component=es-ops
+++ awk -v sel=es-ops '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_ops_pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ '[' -z logging-es-ops-data-master-n5j3wlaj-1-h1fvk ']'
+++ uuidgen
++ local uuid_es=6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ uuidgen
++ local uuid_es_ops=4319ea28-6173-4bc3-9ce7-648647baa4ef
++ local expected=2
++ local timeout=300
++ add_test_message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ local kib_pod=logging-kibana-1-nxzg9
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s http://localhost:5601/6b3a0a88-542f-47e6-b511-c39329a0dcf4
added es message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
added es-ops message 4319ea28-6173-4bc3-9ce7-648647baa4ef
++ echo added es message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
++ logger -i -p local6.info -t 4319ea28-6173-4bc3-9ce7-648647baa4ef 4319ea28-6173-4bc3-9ce7-648647baa4ef
++ echo added es-ops message 4319ea28-6173-4bc3-9ce7-648647baa4ef
++ local rc=0
++ espod=logging-es-data-master-oawpjydu-1-f676p
++ myproject=project.logging
++ mymessage=6b3a0a88-542f-47e6-b511-c39329a0dcf4
++ expected=2
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
++ local nrecs=0
++ test 0 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
++ local nrecs=0
++ test 0 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
++ local nrecs=0
++ test 0 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
++ local nrecs=0
++ test 0 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 296 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
++ local nrecs=0
++ test 0 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 295 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ get_count_from_json
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
++ local nrecs=0
++ test 0 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 294 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
++ local nrecs=0
++ test 0 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 293 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
++ local nrecs=0
++ test 0 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 292 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
++ local nrecs=0
++ test 0 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 291 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
++ local nrecs=0
++ test 0 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 290 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
++ local nrecs=0
++ test 0 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 289 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 6b3a0a88-542f-47e6-b511-c39329a0dcf4
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:6b3a0a88-542f-47e6-b511-c39329a0dcf4'
++ local nrecs=2
++ test 2 = 2
++ break
++ '[' 289 -le 0 ']'
++ return 0
good - wait_for_fluentd_to_catch_up: found 2 record project logging for 6b3a0a88-542f-47e6-b511-c39329a0dcf4
++ echo good - wait_for_fluentd_to_catch_up: found 2 record project logging for 6b3a0a88-542f-47e6-b511-c39329a0dcf4
++ espod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ myproject=.operations
++ mymessage=4319ea28-6173-4bc3-9ce7-648647baa4ef
++ expected=2
++ myfield=systemd.u.SYSLOG_IDENTIFIER
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 4319ea28-6173-4bc3-9ce7-648647baa4ef
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:4319ea28-6173-4bc3-9ce7-648647baa4ef' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:4319ea28-6173-4bc3-9ce7-648647baa4ef'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:4319ea28-6173-4bc3-9ce7-648647baa4ef'
++ local nrecs=1
++ test 1 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 4319ea28-6173-4bc3-9ce7-648647baa4ef
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:4319ea28-6173-4bc3-9ce7-648647baa4ef' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:4319ea28-6173-4bc3-9ce7-648647baa4ef'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:4319ea28-6173-4bc3-9ce7-648647baa4ef'
++ local nrecs=1
++ test 1 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 4319ea28-6173-4bc3-9ce7-648647baa4ef
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:4319ea28-6173-4bc3-9ce7-648647baa4ef' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:4319ea28-6173-4bc3-9ce7-648647baa4ef'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:4319ea28-6173-4bc3-9ce7-648647baa4ef'
++ local nrecs=1
++ test 1 = 2
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 4319ea28-6173-4bc3-9ce7-648647baa4ef
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:4319ea28-6173-4bc3-9ce7-648647baa4ef' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:4319ea28-6173-4bc3-9ce7-648647baa4ef'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:4319ea28-6173-4bc3-9ce7-648647baa4ef'
good - wait_for_fluentd_to_catch_up: found 2 record project .operations for 4319ea28-6173-4bc3-9ce7-648647baa4ef
++ local nrecs=2
++ test 2 = 2
++ break
++ '[' 297 -le 0 ']'
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 2 record project .operations for 4319ea28-6173-4bc3-9ce7-648647baa4ef
++ '[' -n '' ']'
++ '[' -n '' ']'
+++ date +%s
++ local endtime=1497028104
+++ expr 1497028104 - 1497028082
+++ date -u --rfc-3339=ns
END wait_for_fluentd_to_catch_up took 22 seconds at 2017-06-09 17:08:24.643764878+00:00
++ echo END wait_for_fluentd_to_catch_up took 22 seconds at 2017-06-09 17:08:24.643764878+00:00
++ return 0
++ '[' 0 -ne 0 ']'
++ return 0
++ oc replace --force -f /tmp/tmp.zg2gri58ul
template "logging-fluentd-template" deleted
template "logging-fluentd-template" replaced
++ rm -f /tmp/tmp.zg2gri58ul
++ restart_fluentd
++ oc delete daemonset logging-fluentd
daemonset "logging-fluentd" deleted
++ wait_for_pod_ACTION stop logging-fluentd-v9z2k
++ local ii=120
++ local incr=10
++ '[' stop = start ']'
++ curpod=logging-fluentd-v9z2k
++ '[' -z logging-fluentd-v9z2k -a -n '' ']'
++ '[' 120 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-v9z2k
++ '[' stop = start ']'
++ break
++ '[' 120 -le 0 ']'
++ return 0
++ oc process logging-fluentd-template
++ oc create -f -
daemonset "logging-fluentd" created
++ wait_for_pod_ACTION start fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
pod for component=fluentd not running yet
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
++ '[' -n '' ']'
++ '[' -n 1 ']'
++ echo pod for component=fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=logging-fluentd-qhrc7
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-fluentd-qhrc7 ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
START wait_for_fluentd_to_catch_up at 2017-06-09 17:08:42.547996489+00:00
++ fpod=logging-fluentd-qhrc7
++ write_and_verify_logs 1
++ rc=0
++ wait_for_fluentd_to_catch_up '' '' 1
+++ date +%s
++ local starttime=1497028122
+++ date -u --rfc-3339=ns
++ echo START wait_for_fluentd_to_catch_up at 2017-06-09 17:08:42.547996489+00:00
+++ get_running_pod es
+++ oc get pods -l component=es
+++ awk -v sel=es '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_pod=logging-es-data-master-oawpjydu-1-f676p
+++ get_running_pod es-ops
+++ oc get pods -l component=es-ops
+++ awk -v sel=es-ops '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_ops_pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ '[' -z logging-es-ops-data-master-n5j3wlaj-1-h1fvk ']'
+++ uuidgen
++ local uuid_es=c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ uuidgen
++ local uuid_es_ops=452312a5-5bc3-4fab-9224-ef33d0d1fe11
++ local expected=1
++ local timeout=300
++ add_test_message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ local kib_pod=logging-kibana-1-nxzg9
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s http://localhost:5601/c233e0e9-273d-4afe-b6d0-fc85addbcfcc
added es message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
added es-ops message 452312a5-5bc3-4fab-9224-ef33d0d1fe11
++ echo added es message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
++ logger -i -p local6.info -t 452312a5-5bc3-4fab-9224-ef33d0d1fe11 452312a5-5bc3-4fab-9224-ef33d0d1fe11
++ echo added es-ops message 452312a5-5bc3-4fab-9224-ef33d0d1fe11
++ local rc=0
++ espod=logging-es-data-master-oawpjydu-1-f676p
++ myproject=project.logging
++ mymessage=c233e0e9-273d-4afe-b6d0-fc85addbcfcc
++ expected=1
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 296 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 295 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 294 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 293 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 292 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 291 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 290 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 289 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message c233e0e9-273d-4afe-b6d0-fc85addbcfcc
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:c233e0e9-273d-4afe-b6d0-fc85addbcfcc'
++ local nrecs=1
++ test 1 = 1
++ break
good - wait_for_fluentd_to_catch_up: found 1 record project logging for c233e0e9-273d-4afe-b6d0-fc85addbcfcc
++ '[' 289 -le 0 ']'
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project logging for c233e0e9-273d-4afe-b6d0-fc85addbcfcc
++ espod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ myproject=.operations
++ mymessage=452312a5-5bc3-4fab-9224-ef33d0d1fe11
++ expected=1
++ myfield=systemd.u.SYSLOG_IDENTIFIER
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 452312a5-5bc3-4fab-9224-ef33d0d1fe11
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:452312a5-5bc3-4fab-9224-ef33d0d1fe11' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:452312a5-5bc3-4fab-9224-ef33d0d1fe11'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:452312a5-5bc3-4fab-9224-ef33d0d1fe11'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 300 -le 0 ']'
++ return 0
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 452312a5-5bc3-4fab-9224-ef33d0d1fe11
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 452312a5-5bc3-4fab-9224-ef33d0d1fe11
++ '[' -n '' ']'
++ '[' -n '' ']'
+++ date +%s
++ local endtime=1497028140
+++ expr 1497028140 - 1497028122
+++ date -u --rfc-3339=ns
END wait_for_fluentd_to_catch_up took 18 seconds at 2017-06-09 17:09:00.543549087+00:00
running test test-fluentd-forward.sh
++ echo END wait_for_fluentd_to_catch_up took 18 seconds at 2017-06-09 17:09:00.543549087+00:00
++ return 0
++ '[' 0 -ne 0 ']'
++ return 0
++ cleanup
++ '[' '!' -f /tmp/tmp.zg2gri58ul ']'
++ return 0
++ set -o nounset
++ set -o pipefail
++ type get_running_pod
++ [[ 1 -ne 1 ]]
++ [[ true = \f\a\l\s\e ]]
++ CLUSTER=true
++ ops=-ops
++ ARTIFACT_DIR=/tmp/origin-aggregated-logging/artifacts
++ '[' '!' -d /tmp/origin-aggregated-logging/artifacts ']'
++ PROJ_PREFIX=project.
++ get_test_user_token
++ local current_project
+++ oc project -q
++ current_project=logging
++ oc login --username=admin --password=admin
+++ oc whoami -t
++ test_token=JPWbT41KJQ5neFMxkyOjhioImeaRR0of3io0bgqdxg8
+++ oc whoami
++ test_name=admin
++ test_ip=127.0.0.1
++ oc login --username=system:admin
++ oc project logging
++ TEST_DIVIDER=------------------------------------------
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ fpod=logging-fluentd-qhrc7
++ write_and_verify_logs 1
++ expected=1
++ rc=0
++ wait_for_fluentd_to_catch_up '' ''
+++ date +%s
START wait_for_fluentd_to_catch_up at 2017-06-09 17:09:02.215348176+00:00
++ local starttime=1497028142
+++ date -u --rfc-3339=ns
++ echo START wait_for_fluentd_to_catch_up at 2017-06-09 17:09:02.215348176+00:00
+++ get_running_pod es
+++ oc get pods -l component=es
+++ awk -v sel=es '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_pod=logging-es-data-master-oawpjydu-1-f676p
+++ get_running_pod es-ops
+++ oc get pods -l component=es-ops
+++ awk -v sel=es-ops '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_ops_pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ '[' -z logging-es-ops-data-master-n5j3wlaj-1-h1fvk ']'
+++ uuidgen
++ local uuid_es=e5902147-d03b-4277-89ce-5764675cdaeb
+++ uuidgen
++ local uuid_es_ops=5ddbc180-7abc-462a-8eb9-82b45f696660
++ local expected=1
++ local timeout=300
++ add_test_message e5902147-d03b-4277-89ce-5764675cdaeb
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ local kib_pod=logging-kibana-1-nxzg9
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s http://localhost:5601/e5902147-d03b-4277-89ce-5764675cdaeb
added es message e5902147-d03b-4277-89ce-5764675cdaeb
added es-ops message 5ddbc180-7abc-462a-8eb9-82b45f696660
++ echo added es message e5902147-d03b-4277-89ce-5764675cdaeb
++ logger -i -p local6.info -t 5ddbc180-7abc-462a-8eb9-82b45f696660 5ddbc180-7abc-462a-8eb9-82b45f696660
++ echo added es-ops message 5ddbc180-7abc-462a-8eb9-82b45f696660
++ local rc=0
++ espod=logging-es-data-master-oawpjydu-1-f676p
++ myproject=project.logging
++ mymessage=e5902147-d03b-4277-89ce-5764675cdaeb
++ expected=1
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message e5902147-d03b-4277-89ce-5764675cdaeb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb'
+++ shift
+++ shift
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message e5902147-d03b-4277-89ce-5764675cdaeb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message e5902147-d03b-4277-89ce-5764675cdaeb
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb'
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message e5902147-d03b-4277-89ce-5764675cdaeb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 296 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message e5902147-d03b-4277-89ce-5764675cdaeb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 295 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message e5902147-d03b-4277-89ce-5764675cdaeb
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:e5902147-d03b-4277-89ce-5764675cdaeb'
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
good - wait_for_fluentd_to_catch_up: found 1 record project logging for e5902147-d03b-4277-89ce-5764675cdaeb
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 295 -le 0 ']'
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project logging for e5902147-d03b-4277-89ce-5764675cdaeb
++ espod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ myproject=.operations
++ mymessage=5ddbc180-7abc-462a-8eb9-82b45f696660
++ expected=1
++ myfield=systemd.u.SYSLOG_IDENTIFIER
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 5ddbc180-7abc-462a-8eb9-82b45f696660
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:5ddbc180-7abc-462a-8eb9-82b45f696660' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:5ddbc180-7abc-462a-8eb9-82b45f696660'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:5ddbc180-7abc-462a-8eb9-82b45f696660'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 5ddbc180-7abc-462a-8eb9-82b45f696660
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:5ddbc180-7abc-462a-8eb9-82b45f696660' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:5ddbc180-7abc-462a-8eb9-82b45f696660'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:5ddbc180-7abc-462a-8eb9-82b45f696660'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 5ddbc180-7abc-462a-8eb9-82b45f696660
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:5ddbc180-7abc-462a-8eb9-82b45f696660' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:5ddbc180-7abc-462a-8eb9-82b45f696660'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:5ddbc180-7abc-462a-8eb9-82b45f696660'
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 5ddbc180-7abc-462a-8eb9-82b45f696660
END wait_for_fluentd_to_catch_up took 12 seconds at 2017-06-09 17:09:14.410674673+00:00
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 298 -le 0 ']'
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 5ddbc180-7abc-462a-8eb9-82b45f696660
++ '[' -n '' ']'
++ '[' -n '' ']'
+++ date +%s
++ local endtime=1497028154
+++ expr 1497028154 - 1497028142
+++ date -u --rfc-3339=ns
++ echo END wait_for_fluentd_to_catch_up took 12 seconds at 2017-06-09 17:09:14.410674673+00:00
++ return 0
++ return 0
++ trap cleanup INT TERM EXIT
++ create_forwarding_fluentd
++ oc create configmap logging-forward-fluentd --from-file=fluent.conf=../templates/forward-fluent.conf
configmap "logging-forward-fluentd" created
++ oc get template/logging-fluentd-template -o yaml
++ sed -e 's/logging-infra-fluentd: "true"/logging-infra-forward-fluentd: "true"/' -e 's/name: logging-fluentd/name: logging-forward-fluentd/' -e 's/ fluentd/ forward-fluentd/' -e '/image:/ a \
          ports: \
            - containerPort: 24284'
++ oc new-app -f -
--> Deploying template "logging/logging-forward-fluentd-template" for "-" to project logging

     logging-forward-fluentd-template
     ---------
     Template for logging forward-fluentd deployment.

     * With parameters:
        * IMAGE_PREFIX=172.30.197.120:5000/logging/
        * IMAGE_VERSION=latest

--> Creating resources ...
    daemonset "logging-forward-fluentd" created
--> Success
    Run 'oc status' to view your app.
++ oc label node --all logging-infra-forward-fluentd=true
node "172.18.7.3" labeled
++ wait_for_pod_ACTION start forward-fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod forward-fluentd
+++ oc get pods -l component=forward-fluentd
+++ awk -v sel=forward-fluentd '$1 ~ sel && $3 == "Running" {print $1}'
pod for component=forward-fluentd not running yet
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
++ '[' -n '' ']'
++ '[' -n 1 ']'
++ echo pod for component=forward-fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod forward-fluentd
+++ oc get pods -l component=forward-fluentd
+++ awk -v sel=forward-fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=logging-forward-fluentd-kzb7q
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-forward-fluentd-kzb7q ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
++ update_current_fluentd
++ oc label node --all logging-infra-fluentd-
node "172.18.7.3" labeled
++ wait_for_pod_ACTION stop logging-fluentd-qhrc7
++ local ii=120
++ local incr=10
++ '[' stop = start ']'
++ curpod=logging-fluentd-qhrc7
++ '[' -z logging-fluentd-qhrc7 -a -n '' ']'
++ '[' 120 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-qhrc7
pod logging-fluentd-qhrc7 still running
++ '[' -n 1 ']'
++ echo pod logging-fluentd-qhrc7 still running
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' stop = start ']'
++ '[' 110 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-qhrc7
++ '[' stop = start ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
++ oc get configmap/logging-fluentd -o yaml
++ sed '/## matches/ a\
      <match **>\
        @include configs.d/user/secure-forward.conf\
      </match>'
++ oc replace -f -
configmap "logging-fluentd" replaced
+++ oc get pods -l component=forward-fluentd -o name
++ POD=pods/logging-forward-fluentd-kzb7q
+++ oc get pods/logging-forward-fluentd-kzb7q '--template={{.status.podIP}}'
++ FLUENTD_FORWARD=172.17.0.10
++ oc patch configmap/logging-fluentd --type=json --patch '[{ "op": "replace", "path": "/data/secure-forward.conf", "value": "\
  @type secure_forward\n\
  self_hostname forwarding-${HOSTNAME}\n\
  shared_key aggregated_logging_ci_testing\n\
  secure no\n\
  buffer_queue_limit \"#{ENV['\''BUFFER_QUEUE_LIMIT'\'']}\"\n\
  buffer_chunk_limit \"#{ENV['\''BUFFER_SIZE_LIMIT'\'']}\"\n\
  <server>\n\
   host 172.17.0.10\n\
   port 24284\n\
  </server>"}]'
configmap "logging-fluentd" patched
++ oc label node --all logging-infra-fluentd=true
node "172.18.7.3" labeled
++ wait_for_pod_ACTION start fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
++ '[' -n '' ']'
++ '[' -n 1 ']'
++ echo pod for component=fluentd not running yet
pod for component=fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=logging-fluentd-sb333
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-fluentd-sb333 ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
++ write_and_verify_logs 1
++ expected=1
++ rc=0
++ wait_for_fluentd_to_catch_up '' ''
+++ date +%s
++ local starttime=1497028188
+++ date -u --rfc-3339=ns
START wait_for_fluentd_to_catch_up at 2017-06-09 17:09:48.376269694+00:00
++ echo START wait_for_fluentd_to_catch_up at 2017-06-09 17:09:48.376269694+00:00
+++ get_running_pod es
+++ oc get pods -l component=es
+++ awk -v sel=es '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_pod=logging-es-data-master-oawpjydu-1-f676p
+++ get_running_pod es-ops
+++ oc get pods -l component=es-ops
+++ awk -v sel=es-ops '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_ops_pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ '[' -z logging-es-ops-data-master-n5j3wlaj-1-h1fvk ']'
+++ uuidgen
++ local uuid_es=4e502554-6568-485d-8687-a698c5e73842
+++ uuidgen
++ local uuid_es_ops=692c3de6-33e5-40da-a6f4-37937b9c2baa
++ local expected=1
++ local timeout=300
++ add_test_message 4e502554-6568-485d-8687-a698c5e73842
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ local kib_pod=logging-kibana-1-nxzg9
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s http://localhost:5601/4e502554-6568-485d-8687-a698c5e73842
added es message 4e502554-6568-485d-8687-a698c5e73842
++ echo added es message 4e502554-6568-485d-8687-a698c5e73842
++ logger -i -p local6.info -t 692c3de6-33e5-40da-a6f4-37937b9c2baa 692c3de6-33e5-40da-a6f4-37937b9c2baa
added es-ops message 692c3de6-33e5-40da-a6f4-37937b9c2baa
++ echo added es-ops message 692c3de6-33e5-40da-a6f4-37937b9c2baa
++ local rc=0
++ espod=logging-es-data-master-oawpjydu-1-f676p
++ myproject=project.logging
++ mymessage=4e502554-6568-485d-8687-a698c5e73842
++ expected=1
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 296 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 295 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 294 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 293 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 292 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 291 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 290 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 289 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 288 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 287 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 286 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 285 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 284 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 283 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 282 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 281 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 280 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 279 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 278 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 277 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 276 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 275 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 274 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 273 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 272 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 271 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 270 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 269 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 268 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 267 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 266 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 265 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ get_count_from_json
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 264 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 263 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 262 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 261 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 260 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 259 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 258 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 257 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 256 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 255 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 254 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 253 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 252 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 251 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 250 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 249 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 248 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 247 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 246 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 245 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 4e502554-6568-485d-8687-a698c5e73842
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:4e502554-6568-485d-8687-a698c5e73842'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 245 -le 0 ']'
++ return 0
good - wait_for_fluentd_to_catch_up: found 1 record project logging for 4e502554-6568-485d-8687-a698c5e73842
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project logging for 4e502554-6568-485d-8687-a698c5e73842
++ espod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ myproject=.operations
++ mymessage=692c3de6-33e5-40da-a6f4-37937b9c2baa
++ expected=1
++ myfield=systemd.u.SYSLOG_IDENTIFIER
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 692c3de6-33e5-40da-a6f4-37937b9c2baa
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:692c3de6-33e5-40da-a6f4-37937b9c2baa' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:692c3de6-33e5-40da-a6f4-37937b9c2baa'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:692c3de6-33e5-40da-a6f4-37937b9c2baa'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 300 -le 0 ']'
++ return 0
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 692c3de6-33e5-40da-a6f4-37937b9c2baa
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 692c3de6-33e5-40da-a6f4-37937b9c2baa
++ '[' -n '' ']'
++ '[' -n '' ']'
+++ date +%s
++ local endtime=1497028270
+++ expr 1497028270 - 1497028188
+++ date -u --rfc-3339=ns
END wait_for_fluentd_to_catch_up took 82 seconds at 2017-06-09 17:11:10.156707741+00:00
++ echo END wait_for_fluentd_to_catch_up took 82 seconds at 2017-06-09 17:11:10.156707741+00:00
++ return 0
++ return 0
++ cleanup
++ cleanup_forward
++ oc label node --all logging-infra-fluentd-
node "172.18.7.3" labeled
++ wait_for_pod_ACTION stop logging-fluentd-qhrc7
++ local ii=120
++ local incr=10
++ '[' stop = start ']'
++ curpod=logging-fluentd-qhrc7
++ '[' -z logging-fluentd-qhrc7 -a -n '' ']'
++ '[' 120 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-qhrc7
++ '[' stop = start ']'
++ break
++ '[' 120 -le 0 ']'
++ return 0
++ oc delete daemonset/logging-forward-fluentd
daemonset "logging-forward-fluentd" deleted
+++ oc get configmap/logging-fluentd -o yaml
+++ grep '<match \*\*>'
++ '[' -n '      <match **>' ']'
++ oc get configmap/logging-fluentd -o yaml
++ sed -e '/<match \*\*>/ d' -e '/@include configs\.d\/user\/secure-forward\.conf/ d' -e '/<\/match>/ d'
++ oc replace -f -
configmap "logging-fluentd" replaced
++ oc patch configmap/logging-fluentd --type=json --patch '[{ "op": "replace", "path": "/data/secure-forward.conf", "value": "\
# @type secure_forward\n\
# self_hostname forwarding-${HOSTNAME}\n\
# shared_key aggregated_logging_ci_testing\n\
#  secure no\n\
#  <server>\n\
#   host ${FLUENTD_FORWARD}\n\
#   port 24284\n\
#  </server>"}]'
configmap "logging-fluentd" patched
++ oc label node --all logging-infra-fluentd=true
node "172.18.7.3" labeled
++ wait_for_pod_ACTION start fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
++ '[' -n '' ']'
++ '[' -n 1 ']'
pod for component=fluentd not running yet
++ echo pod for component=fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=logging-fluentd-31277
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-fluentd-31277 ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ fpod=logging-fluentd-31277
++ oc get events -o yaml
++ write_and_verify_logs 1
++ expected=1
++ rc=0
++ wait_for_fluentd_to_catch_up '' ''
+++ date +%s
++ local starttime=1497028292
+++ date -u --rfc-3339=ns
START wait_for_fluentd_to_catch_up at 2017-06-09 17:11:32.386693717+00:00
++ echo START wait_for_fluentd_to_catch_up at 2017-06-09 17:11:32.386693717+00:00
+++ get_running_pod es
+++ oc get pods -l component=es
+++ awk -v sel=es '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_pod=logging-es-data-master-oawpjydu-1-f676p
+++ get_running_pod es-ops
+++ oc get pods -l component=es-ops
+++ awk -v sel=es-ops '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_ops_pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ '[' -z logging-es-ops-data-master-n5j3wlaj-1-h1fvk ']'
+++ uuidgen
++ local uuid_es=5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ uuidgen
++ local uuid_es_ops=57f538b0-5eb3-41fb-8545-f99be8df52a2
++ local expected=1
++ local timeout=300
++ add_test_message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ local kib_pod=logging-kibana-1-nxzg9
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s http://localhost:5601/5b92d558-c240-4fa7-84e7-dab4afb419f2
added es message 5b92d558-c240-4fa7-84e7-dab4afb419f2
++ echo added es message 5b92d558-c240-4fa7-84e7-dab4afb419f2
++ logger -i -p local6.info -t 57f538b0-5eb3-41fb-8545-f99be8df52a2 57f538b0-5eb3-41fb-8545-f99be8df52a2
added es-ops message 57f538b0-5eb3-41fb-8545-f99be8df52a2
++ echo added es-ops message 57f538b0-5eb3-41fb-8545-f99be8df52a2
++ local rc=0
++ espod=logging-es-data-master-oawpjydu-1-f676p
++ myproject=project.logging
++ mymessage=5b92d558-c240-4fa7-84e7-dab4afb419f2
++ expected=1
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 296 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 295 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 294 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 293 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 292 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
+++ shift
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 291 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
+++ shift
+++ shift
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 290 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 289 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 5b92d558-c240-4fa7-84e7-dab4afb419f2
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:5b92d558-c240-4fa7-84e7-dab4afb419f2'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 289 -le 0 ']'
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project logging for 5b92d558-c240-4fa7-84e7-dab4afb419f2
good - wait_for_fluentd_to_catch_up: found 1 record project logging for 5b92d558-c240-4fa7-84e7-dab4afb419f2
++ espod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ myproject=.operations
++ mymessage=57f538b0-5eb3-41fb-8545-f99be8df52a2
++ expected=1
++ myfield=systemd.u.SYSLOG_IDENTIFIER
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 57f538b0-5eb3-41fb-8545-f99be8df52a2
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:57f538b0-5eb3-41fb-8545-f99be8df52a2' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:57f538b0-5eb3-41fb-8545-f99be8df52a2'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:57f538b0-5eb3-41fb-8545-f99be8df52a2'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 300 -le 0 ']'
++ return 0
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 57f538b0-5eb3-41fb-8545-f99be8df52a2
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 57f538b0-5eb3-41fb-8545-f99be8df52a2
++ '[' -n '' ']'
++ '[' -n '' ']'
+++ date +%s
++ local endtime=1497028310
+++ expr 1497028310 - 1497028292
+++ date -u --rfc-3339=ns
END wait_for_fluentd_to_catch_up took 18 seconds at 2017-06-09 17:11:50.456643357+00:00
++ echo END wait_for_fluentd_to_catch_up took 18 seconds at 2017-06-09 17:11:50.456643357+00:00
++ return 0
++ return 0
++ cleanup
++ cleanup_forward
++ oc label node --all logging-infra-fluentd-
node "172.18.7.3" labeled
++ wait_for_pod_ACTION stop logging-fluentd-31277
++ local ii=120
++ local incr=10
++ '[' stop = start ']'
++ curpod=logging-fluentd-31277
++ '[' -z logging-fluentd-31277 -a -n '' ']'
++ '[' 120 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-31277
pod logging-fluentd-31277 still running
++ '[' -n 1 ']'
++ echo pod logging-fluentd-31277 still running
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' stop = start ']'
++ '[' 110 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-31277
++ '[' stop = start ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
++ oc delete daemonset/logging-forward-fluentd
Error from server (NotFound): daemonsets.extensions "logging-forward-fluentd" not found
++ :
+++ oc get configmap/logging-fluentd -o yaml
+++ grep '<match \*\*>'
++ '[' -n '' ']'
++ oc patch configmap/logging-fluentd --type=json --patch '[{ "op": "replace", "path": "/data/secure-forward.conf", "value": "\
# @type secure_forward\n\
# self_hostname forwarding-${HOSTNAME}\n\
# shared_key aggregated_logging_ci_testing\n\
#  secure no\n\
#  <server>\n\
#   host ${FLUENTD_FORWARD}\n\
#   port 24284\n\
#  </server>"}]'
configmap "logging-fluentd" not patched
++ oc label node --all logging-infra-fluentd=true
node "172.18.7.3" labeled
++ wait_for_pod_ACTION start fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
pod for component=fluentd not running yet
++ '[' -n '' ']'
++ '[' -n 1 ']'
++ echo pod for component=fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=logging-fluentd-k81b9
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-fluentd-k81b9 ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ fpod=logging-fluentd-k81b9
++ oc get events -o yaml
running test test-json-parsing.sh
++ set -o nounset
++ set -o pipefail
++ type get_running_pod
++ ARTIFACT_DIR=/tmp/origin-aggregated-logging/artifacts
++ '[' '!' -d /tmp/origin-aggregated-logging/artifacts ']'
+++ get_running_pod es
+++ oc get pods -l component=es
+++ awk -v sel=es '$1 ~ sel && $3 == "Running" {print $1}'
++ es_pod=logging-es-data-master-oawpjydu-1-f676p
+++ uuidgen
++ uuid_es=86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
Adding test message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb to Kibana . . .
++ echo Adding test message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb to Kibana . . .
++ add_test_message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ local kib_pod=logging-kibana-1-nxzg9
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s http://localhost:5601/86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
Waiting 600 seconds for 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb to show up in Elasticsearch . . .
++ rc=0
++ timeout=600
++ echo Waiting 600 seconds for 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb to show up in Elasticsearch . . .
++ espod=logging-es-data-master-oawpjydu-1-f676p
++ myproject=project.logging.
++ mymessage=86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
++ expected=1
++ wait_until_cmd_or_err test_count_expected test_count_err 600
++ let ii=600
++ local interval=1
++ '[' 600 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _count message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 599 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _count message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 598 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _count message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 597 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _count message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 596 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _count message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 595 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _count message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 594 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _count message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 593 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _count message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 592 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _count message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 591 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _count message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 590 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _count message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 589 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _count message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_count?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
good - ./logging.sh: found 1 record project logging for 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 589 -le 0 ']'
++ return 0
++ echo good - ./logging.sh: found 1 record project logging for 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
Testing if record is in correct format . . .
++ echo Testing if record is in correct format . . .
++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging. _search message 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
++ python test-json-parsing.py 86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb
++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging.*/_search?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb' --connect-timeout 1
++ local pod=logging-es-data-master-oawpjydu-1-f676p
++ local 'endpoint=/project.logging.*/_search?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
++ shift
++ shift
++ args=("${@:-}")
++ local args
++ local secret_dir=/etc/elasticsearch/secret/
++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging.*/_search?q=message:86c5a0ac-c6c1-4a63-a0e6-3ea730c40cdb'
Success: record contains all of the expected fields/values
Success: ./logging.sh passed
++ echo Success: ./logging.sh passed
++ exit 0
running test test-mux.sh
++ set -o nounset
++ set -o pipefail
++ type get_running_pod
++ '[' false == false -o false == false ']'
Skipping -- This test requires both USE_MUX_CLIENT and MUX_ALLOW_EXTERNAL are true.
++ echo 'Skipping -- This test requires both USE_MUX_CLIENT and MUX_ALLOW_EXTERNAL are true.'
++ exit 0
SKIPPING upgrade test for now
running test test-viaq-data-model.sh
++ set -o nounset
++ set -o pipefail
++ type get_running_pod
++ [[ 1 -ne 1 ]]
++ [[ true = \f\a\l\s\e ]]
++ CLUSTER=true
++ ops=-ops
++ INDEX_PREFIX=
++ PROJ_PREFIX=project.
++ ARTIFACT_DIR=/tmp/origin-aggregated-logging/artifacts
++ '[' '!' -d /tmp/origin-aggregated-logging/artifacts ']'
++ get_test_user_token
++ local current_project
+++ oc project -q
++ current_project=logging
++ oc login --username=admin --password=admin
+++ oc whoami -t
++ test_token=2jcwR9RrLfc74_vuEgMdd8YOf8LPg2UOtrIilUyUFmo
+++ oc whoami
++ test_name=admin
++ test_ip=127.0.0.1
++ oc login --username=system:admin
++ oc project logging
++ TEST_DIVIDER=------------------------------------------
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ fpod=logging-fluentd-k81b9
++ remove_test_volume
++ oc get template logging-fluentd-template -o json
++ python -c 'import json, sys; obj = json.loads(sys.stdin.read()); vm = obj["objects"][0]["spec"]["template"]["spec"]["containers"][0]["volumeMounts"]; obj["objects"][0]["spec"]["template"]["spec"]["containers"][0]["volumeMounts"] = [xx for xx in vm if xx["name"] != "cdmtest"]; vs = obj["objects"][0]["spec"]["template"]["spec"]["volumes"]; obj["objects"][0]["spec"]["template"]["spec"]["volumes"] = [xx for xx in vs if xx["name"] != "cdmtest"]; print json.dumps(obj, indent=2)'
++ oc replace -f -
template "logging-fluentd-template" replaced
+++ mktemp
++ cfg=/tmp/tmp.dJEojPjGvg
++ cat
++ add_test_volume /tmp/tmp.dJEojPjGvg
++ oc get template logging-fluentd-template -o json
++ python -c 'import json, sys; obj = json.loads(sys.stdin.read()); obj["objects"][0]["spec"]["template"]["spec"]["containers"][0]["volumeMounts"].append({"name": "cdmtest", "mountPath": "/etc/fluent/configs.d/openshift/filter-pre-cdm-test.conf", "readOnly": True}); obj["objects"][0]["spec"]["template"]["spec"]["volumes"].append({"name": "cdmtest", "hostPath": {"path": "/tmp/tmp.dJEojPjGvg"}}); print json.dumps(obj, indent=2)'
++ oc replace -f -
template "logging-fluentd-template" replaced
++ trap cleanup INT TERM EXIT
++ restart_fluentd
++ oc delete daemonset logging-fluentd
daemonset "logging-fluentd" deleted
++ wait_for_pod_ACTION stop logging-fluentd-k81b9
++ local ii=120
++ local incr=10
++ '[' stop = start ']'
++ curpod=logging-fluentd-k81b9
++ '[' -z logging-fluentd-k81b9 -a -n '' ']'
++ '[' 120 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-k81b9
++ '[' stop = start ']'
++ break
++ '[' 120 -le 0 ']'
++ return 0
++ oc process logging-fluentd-template
++ oc create -f -
daemonset "logging-fluentd" created
++ wait_for_pod_ACTION start fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
pod for component=fluentd not running yet
++ '[' -n '' ']'
++ '[' -n 1 ']'
++ echo pod for component=fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=logging-fluentd-5k2g9
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-fluentd-5k2g9 ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ fpod=logging-fluentd-5k2g9
++ keep_fields=method,statusCode,type,@timestamp,req,res
++ write_and_verify_logs test1
++ expected=1
++ rc=0
++ wait_for_fluentd_to_catch_up get_logmessage get_logmessage2
+++ date +%s
++ local starttime=1497028372
+++ date -u --rfc-3339=ns
START wait_for_fluentd_to_catch_up at 2017-06-09 17:12:52.198427611+00:00
++ echo START wait_for_fluentd_to_catch_up at 2017-06-09 17:12:52.198427611+00:00
+++ get_running_pod es
+++ oc get pods -l component=es
+++ awk -v sel=es '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_pod=logging-es-data-master-oawpjydu-1-f676p
+++ get_running_pod es-ops
+++ oc get pods -l component=es-ops
+++ awk -v sel=es-ops '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_ops_pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ '[' -z logging-es-ops-data-master-n5j3wlaj-1-h1fvk ']'
+++ uuidgen
++ local uuid_es=3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
+++ uuidgen
++ local uuid_es_ops=d7c81b66-3aa6-4788-b2ee-eec332440620
++ local expected=1
++ local timeout=300
++ add_test_message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ local kib_pod=logging-kibana-1-nxzg9
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s http://localhost:5601/3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
++ echo added es message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
added es message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
++ logger -i -p local6.info -t d7c81b66-3aa6-4788-b2ee-eec332440620 d7c81b66-3aa6-4788-b2ee-eec332440620
added es-ops message d7c81b66-3aa6-4788-b2ee-eec332440620
++ echo added es-ops message d7c81b66-3aa6-4788-b2ee-eec332440620
++ local rc=0
++ espod=logging-es-data-master-oawpjydu-1-f676p
++ myproject=project.logging
++ mymessage=3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
++ expected=1
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1' --connect-timeout 1
+++ get_count_from_json
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1' --connect-timeout 1
+++ get_count_from_json
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 296 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 295 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 294 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 293 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
+++ shift
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 292 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 292 -le 0 ']'
good - wait_for_fluentd_to_catch_up: found 1 record project logging for 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project logging for 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
++ espod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ myproject=.operations
++ mymessage=d7c81b66-3aa6-4788-b2ee-eec332440620
++ expected=1
++ myfield=systemd.u.SYSLOG_IDENTIFIER
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER d7c81b66-3aa6-4788-b2ee-eec332440620
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:d7c81b66-3aa6-4788-b2ee-eec332440620' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:d7c81b66-3aa6-4788-b2ee-eec332440620'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:d7c81b66-3aa6-4788-b2ee-eec332440620'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER d7c81b66-3aa6-4788-b2ee-eec332440620
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:d7c81b66-3aa6-4788-b2ee-eec332440620' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:d7c81b66-3aa6-4788-b2ee-eec332440620'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:d7c81b66-3aa6-4788-b2ee-eec332440620'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER d7c81b66-3aa6-4788-b2ee-eec332440620
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:d7c81b66-3aa6-4788-b2ee-eec332440620' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:d7c81b66-3aa6-4788-b2ee-eec332440620'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:d7c81b66-3aa6-4788-b2ee-eec332440620'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER d7c81b66-3aa6-4788-b2ee-eec332440620
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:d7c81b66-3aa6-4788-b2ee-eec332440620' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:d7c81b66-3aa6-4788-b2ee-eec332440620'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:d7c81b66-3aa6-4788-b2ee-eec332440620'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 297 -le 0 ']'
++ return 0
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for d7c81b66-3aa6-4788-b2ee-eec332440620
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project .operations for d7c81b66-3aa6-4788-b2ee-eec332440620
++ '[' -n get_logmessage ']'
++ get_logmessage 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
++ logmessage=3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
++ '[' -n get_logmessage2 ']'
++ get_logmessage2 d7c81b66-3aa6-4788-b2ee-eec332440620
++ logmessage2=d7c81b66-3aa6-4788-b2ee-eec332440620
+++ date +%s
++ local endtime=1497028390
+++ expr 1497028390 - 1497028372
+++ date -u --rfc-3339=ns
END wait_for_fluentd_to_catch_up took 18 seconds at 2017-06-09 17:13:10.069338627+00:00
++ echo END wait_for_fluentd_to_catch_up took 18 seconds at 2017-06-09 17:13:10.069338627+00:00
++ return 0
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ kpod=logging-kibana-1-nxzg9
++ '[' 0 = 0 ']'
++ curl_es_from_kibana logging-kibana-1-nxzg9 logging-es project.logging _search message 3c3ea2cd-057e-492c-9553-fe69d8bdc0e1
++ python test-viaq-data-model.py test1
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s -k --cert /etc/kibana/keys/cert --key /etc/kibana/keys/key -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer 2jcwR9RrLfc74_vuEgMdd8YOf8LPg2UOtrIilUyUFmo' -H 'X-Forwarded-For: 127.0.0.1' 'https://logging-es:9200/project.logging*/_search?q=message:3c3ea2cd-057e-492c-9553-fe69d8bdc0e1'
++ :
++ '[' 0 = 0 ']'
++ curl_es_from_kibana logging-kibana-1-nxzg9 logging-es-ops .operations _search message d7c81b66-3aa6-4788-b2ee-eec332440620
++ python test-viaq-data-model.py test1
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s -k --cert /etc/kibana/keys/cert --key /etc/kibana/keys/key -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer 2jcwR9RrLfc74_vuEgMdd8YOf8LPg2UOtrIilUyUFmo' -H 'X-Forwarded-For: 127.0.0.1' 'https://logging-es-ops:9200/.operations*/_search?q=message:d7c81b66-3aa6-4788-b2ee-eec332440620'
++ :
++ '[' 0 '!=' 0 ']'
++ return 0
++ add_cdm_env_var_val CDM_USE_UNDEFINED '"true"'
+++ mktemp
++ junk=/tmp/tmp.BFpRdr8TJX
++ cat
++ oc get template logging-fluentd-template -o yaml
++ sed '/env:/r /tmp/tmp.BFpRdr8TJX'
++ oc replace -f -
template "logging-fluentd-template" replaced
++ rm -f /tmp/tmp.BFpRdr8TJX
++ add_cdm_env_var_val CDM_EXTRA_KEEP_FIELDS method,statusCode,type,@timestamp,req,res
+++ mktemp
++ junk=/tmp/tmp.caw8u0Q9p9
++ cat
++ oc get template logging-fluentd-template -o yaml
++ sed '/env:/r /tmp/tmp.caw8u0Q9p9'
++ oc replace -f -
template "logging-fluentd-template" replaced
++ rm -f /tmp/tmp.caw8u0Q9p9
++ restart_fluentd
++ oc delete daemonset logging-fluentd
daemonset "logging-fluentd" deleted
++ wait_for_pod_ACTION stop logging-fluentd-5k2g9
++ local ii=120
++ local incr=10
++ '[' stop = start ']'
++ curpod=logging-fluentd-5k2g9
++ '[' -z logging-fluentd-5k2g9 -a -n '' ']'
++ '[' 120 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-5k2g9
++ '[' stop = start ']'
++ break
++ '[' 120 -le 0 ']'
++ return 0
++ oc process logging-fluentd-template
++ oc create -f -
daemonset "logging-fluentd" created
++ wait_for_pod_ACTION start fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
pod for component=fluentd not running yet
++ '[' -n '' ']'
++ '[' -n 1 ']'
++ echo pod for component=fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=logging-fluentd-t24mn
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-fluentd-t24mn ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ fpod=logging-fluentd-t24mn
++ write_and_verify_logs test2
++ expected=1
++ rc=0
++ wait_for_fluentd_to_catch_up get_logmessage get_logmessage2
+++ date +%s
++ local starttime=1497028412
+++ date -u --rfc-3339=ns
START wait_for_fluentd_to_catch_up at 2017-06-09 17:13:32.077142397+00:00
++ echo START wait_for_fluentd_to_catch_up at 2017-06-09 17:13:32.077142397+00:00
+++ get_running_pod es
+++ oc get pods -l component=es
+++ awk -v sel=es '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_pod=logging-es-data-master-oawpjydu-1-f676p
+++ get_running_pod es-ops
+++ oc get pods -l component=es-ops
+++ awk -v sel=es-ops '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_ops_pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ '[' -z logging-es-ops-data-master-n5j3wlaj-1-h1fvk ']'
+++ uuidgen
++ local uuid_es=7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ uuidgen
++ local uuid_es_ops=77fd1555-cc16-4efe-8fb3-79df027c1ceb
++ local expected=1
++ local timeout=300
++ add_test_message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ local kib_pod=logging-kibana-1-nxzg9
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s http://localhost:5601/7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
added es message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
++ echo added es message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
++ logger -i -p local6.info -t 77fd1555-cc16-4efe-8fb3-79df027c1ceb 77fd1555-cc16-4efe-8fb3-79df027c1ceb
added es-ops message 77fd1555-cc16-4efe-8fb3-79df027c1ceb
++ echo added es-ops message 77fd1555-cc16-4efe-8fb3-79df027c1ceb
++ local rc=0
++ espod=logging-es-data-master-oawpjydu-1-f676p
++ myproject=project.logging
++ mymessage=7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
++ expected=1
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 296 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 295 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 294 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 293 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 292 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 291 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 290 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 289 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 288 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 288 -le 0 ']'
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project logging for 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
good - wait_for_fluentd_to_catch_up: found 1 record project logging for 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
++ espod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ myproject=.operations
++ mymessage=77fd1555-cc16-4efe-8fb3-79df027c1ceb
++ expected=1
++ myfield=systemd.u.SYSLOG_IDENTIFIER
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 77fd1555-cc16-4efe-8fb3-79df027c1ceb
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:77fd1555-cc16-4efe-8fb3-79df027c1ceb' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:77fd1555-cc16-4efe-8fb3-79df027c1ceb'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:77fd1555-cc16-4efe-8fb3-79df027c1ceb'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 300 -le 0 ']'
++ return 0
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 77fd1555-cc16-4efe-8fb3-79df027c1ceb
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 77fd1555-cc16-4efe-8fb3-79df027c1ceb
++ '[' -n get_logmessage ']'
++ get_logmessage 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
++ logmessage=7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
++ '[' -n get_logmessage2 ']'
++ get_logmessage2 77fd1555-cc16-4efe-8fb3-79df027c1ceb
++ logmessage2=77fd1555-cc16-4efe-8fb3-79df027c1ceb
+++ date +%s
++ local endtime=1497028431
+++ expr 1497028431 - 1497028412
+++ date -u --rfc-3339=ns
END wait_for_fluentd_to_catch_up took 19 seconds at 2017-06-09 17:13:51.464556275+00:00
++ echo END wait_for_fluentd_to_catch_up took 19 seconds at 2017-06-09 17:13:51.464556275+00:00
++ return 0
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ kpod=logging-kibana-1-nxzg9
++ '[' 0 = 0 ']'
++ curl_es_from_kibana logging-kibana-1-nxzg9 logging-es project.logging _search message 7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a
++ python test-viaq-data-model.py test2
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s -k --cert /etc/kibana/keys/cert --key /etc/kibana/keys/key -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer 2jcwR9RrLfc74_vuEgMdd8YOf8LPg2UOtrIilUyUFmo' -H 'X-Forwarded-For: 127.0.0.1' 'https://logging-es:9200/project.logging*/_search?q=message:7f6d08f0-ebd9-4a4c-99bb-5ea75d27714a'
++ :
++ '[' 0 = 0 ']'
++ curl_es_from_kibana logging-kibana-1-nxzg9 logging-es-ops .operations _search message 77fd1555-cc16-4efe-8fb3-79df027c1ceb
++ python test-viaq-data-model.py test2
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s -k --cert /etc/kibana/keys/cert --key /etc/kibana/keys/key -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer 2jcwR9RrLfc74_vuEgMdd8YOf8LPg2UOtrIilUyUFmo' -H 'X-Forwarded-For: 127.0.0.1' 'https://logging-es-ops:9200/.operations*/_search?q=message:77fd1555-cc16-4efe-8fb3-79df027c1ceb'
++ :
++ '[' 0 '!=' 0 ']'
++ return 0
++ del_cdm_env_var CDM_EXTRA_KEEP_FIELDS
++ oc get template logging-fluentd-template -o yaml
++ sed '/- name: CDM_EXTRA_KEEP_FIELDS$/,/value:/d'
++ oc replace -f -
template "logging-fluentd-template" replaced
++ add_cdm_env_var_val CDM_EXTRA_KEEP_FIELDS undefined4,undefined5,method,statusCode,type,@timestamp,req,res
+++ mktemp
++ junk=/tmp/tmp.0E8UbktcqM
++ cat
++ oc get template logging-fluentd-template -o yaml
++ sed '/env:/r /tmp/tmp.0E8UbktcqM'
++ oc replace -f -
template "logging-fluentd-template" replaced
++ rm -f /tmp/tmp.0E8UbktcqM
++ restart_fluentd
++ oc delete daemonset logging-fluentd
daemonset "logging-fluentd" deleted
++ wait_for_pod_ACTION stop logging-fluentd-t24mn
++ local ii=120
++ local incr=10
++ '[' stop = start ']'
++ curpod=logging-fluentd-t24mn
++ '[' -z logging-fluentd-t24mn -a -n '' ']'
++ '[' 120 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-t24mn
++ '[' stop = start ']'
++ break
++ '[' 120 -le 0 ']'
++ return 0
++ oc process logging-fluentd-template
++ oc create -f -
daemonset "logging-fluentd" created
++ wait_for_pod_ACTION start fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
pod for component=fluentd not running yet
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
++ '[' -n '' ']'
++ '[' -n 1 ']'
++ echo pod for component=fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=logging-fluentd-vl809
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-fluentd-vl809 ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ fpod=logging-fluentd-vl809
++ write_and_verify_logs test3
++ expected=1
++ rc=0
++ wait_for_fluentd_to_catch_up get_logmessage get_logmessage2
+++ date +%s
++ local starttime=1497028452
+++ date -u --rfc-3339=ns
START wait_for_fluentd_to_catch_up at 2017-06-09 17:14:12.036138198+00:00
++ echo START wait_for_fluentd_to_catch_up at 2017-06-09 17:14:12.036138198+00:00
+++ get_running_pod es
+++ oc get pods -l component=es
+++ awk -v sel=es '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_pod=logging-es-data-master-oawpjydu-1-f676p
+++ get_running_pod es-ops
+++ oc get pods -l component=es-ops
+++ awk -v sel=es-ops '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_ops_pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ '[' -z logging-es-ops-data-master-n5j3wlaj-1-h1fvk ']'
+++ uuidgen
++ local uuid_es=bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ uuidgen
++ local uuid_es_ops=30575da0-db6a-42d9-bda5-9f734215882d
++ local expected=1
++ local timeout=300
++ add_test_message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ local kib_pod=logging-kibana-1-nxzg9
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s http://localhost:5601/bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
added es message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
++ echo added es message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
++ logger -i -p local6.info -t 30575da0-db6a-42d9-bda5-9f734215882d 30575da0-db6a-42d9-bda5-9f734215882d
added es-ops message 30575da0-db6a-42d9-bda5-9f734215882d
++ echo added es-ops message 30575da0-db6a-42d9-bda5-9f734215882d
++ local rc=0
++ espod=logging-es-data-master-oawpjydu-1-f676p
++ myproject=project.logging
++ mymessage=bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
++ expected=1
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 296 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 295 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 294 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 293 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 292 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 291 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 290 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 289 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 288 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
good - wait_for_fluentd_to_catch_up: found 1 record project logging for bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 288 -le 0 ']'
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project logging for bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
++ espod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ myproject=.operations
++ mymessage=30575da0-db6a-42d9-bda5-9f734215882d
++ expected=1
++ myfield=systemd.u.SYSLOG_IDENTIFIER
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 30575da0-db6a-42d9-bda5-9f734215882d
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:30575da0-db6a-42d9-bda5-9f734215882d' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:30575da0-db6a-42d9-bda5-9f734215882d'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:30575da0-db6a-42d9-bda5-9f734215882d'
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 30575da0-db6a-42d9-bda5-9f734215882d
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 300 -le 0 ']'
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 30575da0-db6a-42d9-bda5-9f734215882d
++ '[' -n get_logmessage ']'
++ get_logmessage bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
++ logmessage=bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
++ '[' -n get_logmessage2 ']'
++ get_logmessage2 30575da0-db6a-42d9-bda5-9f734215882d
++ logmessage2=30575da0-db6a-42d9-bda5-9f734215882d
+++ date +%s
++ local endtime=1497028471
+++ expr 1497028471 - 1497028452
+++ date -u --rfc-3339=ns
END wait_for_fluentd_to_catch_up took 19 seconds at 2017-06-09 17:14:31.268773079+00:00
++ echo END wait_for_fluentd_to_catch_up took 19 seconds at 2017-06-09 17:14:31.268773079+00:00
++ return 0
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ kpod=logging-kibana-1-nxzg9
++ '[' 0 = 0 ']'
++ curl_es_from_kibana logging-kibana-1-nxzg9 logging-es project.logging _search message bc9bb09f-4099-46bd-bdf6-1b10c5e869c0
++ python test-viaq-data-model.py test3
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s -k --cert /etc/kibana/keys/cert --key /etc/kibana/keys/key -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer 2jcwR9RrLfc74_vuEgMdd8YOf8LPg2UOtrIilUyUFmo' -H 'X-Forwarded-For: 127.0.0.1' 'https://logging-es:9200/project.logging*/_search?q=message:bc9bb09f-4099-46bd-bdf6-1b10c5e869c0'
++ :
++ '[' 0 = 0 ']'
++ curl_es_from_kibana logging-kibana-1-nxzg9 logging-es-ops .operations _search message 30575da0-db6a-42d9-bda5-9f734215882d
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s -k --cert /etc/kibana/keys/cert --key /etc/kibana/keys/key -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer 2jcwR9RrLfc74_vuEgMdd8YOf8LPg2UOtrIilUyUFmo' -H 'X-Forwarded-For: 127.0.0.1' 'https://logging-es-ops:9200/.operations*/_search?q=message:30575da0-db6a-42d9-bda5-9f734215882d'
++ python test-viaq-data-model.py test3
++ :
++ '[' 0 '!=' 0 ']'
++ return 0
++ add_cdm_env_var_val CDM_UNDEFINED_NAME myname
+++ mktemp
++ junk=/tmp/tmp.HbquYK7BqY
++ cat
++ oc get template logging-fluentd-template -o yaml
++ sed '/env:/r /tmp/tmp.HbquYK7BqY'
++ oc replace -f -
template "logging-fluentd-template" replaced
++ rm -f /tmp/tmp.HbquYK7BqY
++ restart_fluentd
++ oc delete daemonset logging-fluentd
daemonset "logging-fluentd" deleted
++ wait_for_pod_ACTION stop logging-fluentd-vl809
++ local ii=120
++ local incr=10
++ '[' stop = start ']'
++ curpod=logging-fluentd-vl809
++ '[' -z logging-fluentd-vl809 -a -n '' ']'
++ '[' 120 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-vl809
++ '[' stop = start ']'
++ break
++ '[' 120 -le 0 ']'
++ return 0
++ oc process logging-fluentd-template
++ oc create -f -
daemonset "logging-fluentd" created
++ wait_for_pod_ACTION start fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
++ '[' -n '' ']'
pod for component=fluentd not running yet
++ '[' -n 1 ']'
++ echo pod for component=fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=logging-fluentd-9fqx3
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-fluentd-9fqx3 ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ fpod=logging-fluentd-9fqx3
++ write_and_verify_logs test4
++ expected=1
++ rc=0
++ wait_for_fluentd_to_catch_up get_logmessage get_logmessage2
+++ date +%s
++ local starttime=1497028488
+++ date -u --rfc-3339=ns
START wait_for_fluentd_to_catch_up at 2017-06-09 17:14:48.366738855+00:00
++ echo START wait_for_fluentd_to_catch_up at 2017-06-09 17:14:48.366738855+00:00
+++ get_running_pod es
+++ oc get pods -l component=es
+++ awk -v sel=es '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_pod=logging-es-data-master-oawpjydu-1-f676p
+++ get_running_pod es-ops
+++ oc get pods -l component=es-ops
+++ awk -v sel=es-ops '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_ops_pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ '[' -z logging-es-ops-data-master-n5j3wlaj-1-h1fvk ']'
+++ uuidgen
++ local uuid_es=9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ uuidgen
++ local uuid_es_ops=80709a4c-cca4-4d86-8034-96615e07a50b
++ local expected=1
++ local timeout=300
++ add_test_message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ local kib_pod=logging-kibana-1-nxzg9
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s http://localhost:5601/9da5cfd5-c8c9-47cf-b514-7d674b14bca5
added es message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
++ echo added es message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
++ logger -i -p local6.info -t 80709a4c-cca4-4d86-8034-96615e07a50b 80709a4c-cca4-4d86-8034-96615e07a50b
added es-ops message 80709a4c-cca4-4d86-8034-96615e07a50b
++ echo added es-ops message 80709a4c-cca4-4d86-8034-96615e07a50b
++ local rc=0
++ espod=logging-es-data-master-oawpjydu-1-f676p
++ myproject=project.logging
++ mymessage=9da5cfd5-c8c9-47cf-b514-7d674b14bca5
++ expected=1
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 296 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
+++ shift
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 295 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 294 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 293 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 292 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 291 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 290 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 289 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 289 -le 0 ']'
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project logging for 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
good - wait_for_fluentd_to_catch_up: found 1 record project logging for 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
++ espod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ myproject=.operations
++ mymessage=80709a4c-cca4-4d86-8034-96615e07a50b
++ expected=1
++ myfield=systemd.u.SYSLOG_IDENTIFIER
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 80709a4c-cca4-4d86-8034-96615e07a50b
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:80709a4c-cca4-4d86-8034-96615e07a50b' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:80709a4c-cca4-4d86-8034-96615e07a50b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:80709a4c-cca4-4d86-8034-96615e07a50b'
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 80709a4c-cca4-4d86-8034-96615e07a50b
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:80709a4c-cca4-4d86-8034-96615e07a50b' --connect-timeout 1
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:80709a4c-cca4-4d86-8034-96615e07a50b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:80709a4c-cca4-4d86-8034-96615e07a50b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 80709a4c-cca4-4d86-8034-96615e07a50b
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:80709a4c-cca4-4d86-8034-96615e07a50b' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:80709a4c-cca4-4d86-8034-96615e07a50b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:80709a4c-cca4-4d86-8034-96615e07a50b'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER 80709a4c-cca4-4d86-8034-96615e07a50b
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:80709a4c-cca4-4d86-8034-96615e07a50b' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:80709a4c-cca4-4d86-8034-96615e07a50b'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:80709a4c-cca4-4d86-8034-96615e07a50b'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 297 -le 0 ']'
++ return 0
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 80709a4c-cca4-4d86-8034-96615e07a50b
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 80709a4c-cca4-4d86-8034-96615e07a50b
++ '[' -n get_logmessage ']'
++ get_logmessage 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
++ logmessage=9da5cfd5-c8c9-47cf-b514-7d674b14bca5
++ '[' -n get_logmessage2 ']'
++ get_logmessage2 80709a4c-cca4-4d86-8034-96615e07a50b
++ logmessage2=80709a4c-cca4-4d86-8034-96615e07a50b
+++ date +%s
++ local endtime=1497028510
+++ expr 1497028510 - 1497028488
+++ date -u --rfc-3339=ns
END wait_for_fluentd_to_catch_up took 22 seconds at 2017-06-09 17:15:10.504487260+00:00
++ echo END wait_for_fluentd_to_catch_up took 22 seconds at 2017-06-09 17:15:10.504487260+00:00
++ return 0
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ kpod=logging-kibana-1-nxzg9
++ '[' 0 = 0 ']'
++ curl_es_from_kibana logging-kibana-1-nxzg9 logging-es project.logging _search message 9da5cfd5-c8c9-47cf-b514-7d674b14bca5
++ python test-viaq-data-model.py test4
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s -k --cert /etc/kibana/keys/cert --key /etc/kibana/keys/key -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer 2jcwR9RrLfc74_vuEgMdd8YOf8LPg2UOtrIilUyUFmo' -H 'X-Forwarded-For: 127.0.0.1' 'https://logging-es:9200/project.logging*/_search?q=message:9da5cfd5-c8c9-47cf-b514-7d674b14bca5'
++ :
++ '[' 0 = 0 ']'
++ curl_es_from_kibana logging-kibana-1-nxzg9 logging-es-ops .operations _search message 80709a4c-cca4-4d86-8034-96615e07a50b
++ python test-viaq-data-model.py test4
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s -k --cert /etc/kibana/keys/cert --key /etc/kibana/keys/key -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer 2jcwR9RrLfc74_vuEgMdd8YOf8LPg2UOtrIilUyUFmo' -H 'X-Forwarded-For: 127.0.0.1' 'https://logging-es-ops:9200/.operations*/_search?q=message:80709a4c-cca4-4d86-8034-96615e07a50b'
++ :
++ '[' 0 '!=' 0 ']'
++ return 0
++ del_cdm_env_var CDM_EXTRA_KEEP_FIELDS
++ oc get template logging-fluentd-template -o yaml
++ oc replace -f -
++ sed '/- name: CDM_EXTRA_KEEP_FIELDS$/,/value:/d'
template "logging-fluentd-template" replaced
++ add_cdm_env_var_val CDM_EXTRA_KEEP_FIELDS undefined4,undefined5,empty1,undefined3,method,statusCode,type,@timestamp,req,res
+++ mktemp
++ junk=/tmp/tmp.PCT7E5Mxaf
++ cat
++ oc get template logging-fluentd-template -o yaml
++ sed '/env:/r /tmp/tmp.PCT7E5Mxaf'
++ oc replace -f -
template "logging-fluentd-template" replaced
++ rm -f /tmp/tmp.PCT7E5Mxaf
++ add_cdm_env_var_val CDM_KEEP_EMPTY_FIELDS undefined4,undefined5,empty1,undefined3
+++ mktemp
++ junk=/tmp/tmp.pfglr1MDev
++ cat
++ oc get template logging-fluentd-template -o yaml
++ sed '/env:/r /tmp/tmp.pfglr1MDev'
++ oc replace -f -
template "logging-fluentd-template" replaced
++ rm -f /tmp/tmp.pfglr1MDev
++ restart_fluentd
++ oc delete daemonset logging-fluentd
daemonset "logging-fluentd" deleted
++ wait_for_pod_ACTION stop logging-fluentd-9fqx3
++ local ii=120
++ local incr=10
++ '[' stop = start ']'
++ curpod=logging-fluentd-9fqx3
++ '[' -z logging-fluentd-9fqx3 -a -n '' ']'
++ '[' 120 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-9fqx3
++ '[' stop = start ']'
++ break
++ '[' 120 -le 0 ']'
++ return 0
++ oc process logging-fluentd-template
++ oc create -f -
daemonset "logging-fluentd" created
++ wait_for_pod_ACTION start fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
++ '[' -n '' ']'
++ '[' -n 1 ']'
pod for component=fluentd not running yet
++ echo pod for component=fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ curpod=logging-fluentd-hlw91
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-fluentd-hlw91 ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
++ fpod=logging-fluentd-hlw91
++ write_and_verify_logs test5 allow_empty
++ expected=1
++ rc=0
++ wait_for_fluentd_to_catch_up get_logmessage get_logmessage2
+++ date +%s
++ local starttime=1497028528
+++ date -u --rfc-3339=ns
START wait_for_fluentd_to_catch_up at 2017-06-09 17:15:28.617351497+00:00
++ echo START wait_for_fluentd_to_catch_up at 2017-06-09 17:15:28.617351497+00:00
+++ get_running_pod es
+++ oc get pods -l component=es
+++ awk -v sel=es '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_pod=logging-es-data-master-oawpjydu-1-f676p
+++ get_running_pod es-ops
+++ oc get pods -l component=es-ops
+++ awk -v sel=es-ops '$1 ~ sel && $3 == "Running" {print $1}'
++ local es_ops_pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ '[' -z logging-es-ops-data-master-n5j3wlaj-1-h1fvk ']'
+++ uuidgen
++ local uuid_es=f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ uuidgen
++ local uuid_es_ops=ba7aeca8-b88a-4359-80d9-5b3347e0c1fd
++ local expected=1
++ local timeout=300
++ add_test_message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ local kib_pod=logging-kibana-1-nxzg9
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s http://localhost:5601/f629100c-8636-4d6b-91fa-ba31e64c5afe
added es message f629100c-8636-4d6b-91fa-ba31e64c5afe
++ echo added es message f629100c-8636-4d6b-91fa-ba31e64c5afe
++ logger -i -p local6.info -t ba7aeca8-b88a-4359-80d9-5b3347e0c1fd ba7aeca8-b88a-4359-80d9-5b3347e0c1fd
added es-ops message ba7aeca8-b88a-4359-80d9-5b3347e0c1fd
++ echo added es-ops message ba7aeca8-b88a-4359-80d9-5b3347e0c1fd
++ local rc=0
++ espod=logging-es-data-master-oawpjydu-1-f676p
++ myproject=project.logging
++ mymessage=f629100c-8636-4d6b-91fa-ba31e64c5afe
++ expected=1
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 299 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 298 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 297 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 296 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 295 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 294 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 293 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_count_from_json
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 292 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 291 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 290 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ local 'endpoint=/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
+++ shift
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ local nrecs=0
++ test 0 = 1
++ sleep 1
++ let ii=ii-1
++ '[' 289 -gt 0 ']'
++ test_count_expected
++ myfield=message
+++ query_es_from_es logging-es-data-master-oawpjydu-1-f676p project.logging _count message f629100c-8636-4d6b-91fa-ba31e64c5afe
+++ get_count_from_json
+++ curl_es logging-es-data-master-oawpjydu-1-f676p '/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe' --connect-timeout 1
+++ local pod=logging-es-data-master-oawpjydu-1-f676p
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-data-master-oawpjydu-1-f676p -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/project.logging*/_count?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 289 -le 0 ']'
++ return 0
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project logging for f629100c-8636-4d6b-91fa-ba31e64c5afe
good - wait_for_fluentd_to_catch_up: found 1 record project logging for f629100c-8636-4d6b-91fa-ba31e64c5afe
++ espod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
++ myproject=.operations
++ mymessage=ba7aeca8-b88a-4359-80d9-5b3347e0c1fd
++ expected=1
++ myfield=systemd.u.SYSLOG_IDENTIFIER
++ wait_until_cmd_or_err test_count_expected test_count_err 300
++ let ii=300
++ local interval=1
++ '[' 300 -gt 0 ']'
++ test_count_expected
++ myfield=systemd.u.SYSLOG_IDENTIFIER
+++ query_es_from_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk .operations _count systemd.u.SYSLOG_IDENTIFIER ba7aeca8-b88a-4359-80d9-5b3347e0c1fd
+++ get_count_from_json
+++ curl_es logging-es-ops-data-master-n5j3wlaj-1-h1fvk '/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:ba7aeca8-b88a-4359-80d9-5b3347e0c1fd' --connect-timeout 1
+++ local pod=logging-es-ops-data-master-n5j3wlaj-1-h1fvk
+++ python -c 'import json, sys; print json.loads(sys.stdin.read()).get("count", 0)'
+++ local 'endpoint=/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:ba7aeca8-b88a-4359-80d9-5b3347e0c1fd'
+++ shift
+++ shift
+++ args=("${@:-}")
+++ local args
+++ local secret_dir=/etc/elasticsearch/secret/
+++ oc exec logging-es-ops-data-master-n5j3wlaj-1-h1fvk -- curl --silent --insecure --connect-timeout 1 --key /etc/elasticsearch/secret/admin-key --cert /etc/elasticsearch/secret/admin-cert 'https://localhost:9200/.operations*/_count?q=systemd.u.SYSLOG_IDENTIFIER:ba7aeca8-b88a-4359-80d9-5b3347e0c1fd'
++ local nrecs=1
++ test 1 = 1
++ break
++ '[' 300 -le 0 ']'
++ return 0
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for ba7aeca8-b88a-4359-80d9-5b3347e0c1fd
++ echo good - wait_for_fluentd_to_catch_up: found 1 record project .operations for ba7aeca8-b88a-4359-80d9-5b3347e0c1fd
++ '[' -n get_logmessage ']'
++ get_logmessage f629100c-8636-4d6b-91fa-ba31e64c5afe
++ logmessage=f629100c-8636-4d6b-91fa-ba31e64c5afe
++ '[' -n get_logmessage2 ']'
++ get_logmessage2 ba7aeca8-b88a-4359-80d9-5b3347e0c1fd
++ logmessage2=ba7aeca8-b88a-4359-80d9-5b3347e0c1fd
+++ date +%s
++ local endtime=1497028546
+++ expr 1497028546 - 1497028528
+++ date -u --rfc-3339=ns
END wait_for_fluentd_to_catch_up took 18 seconds at 2017-06-09 17:15:46.726240177+00:00
++ echo END wait_for_fluentd_to_catch_up took 18 seconds at 2017-06-09 17:15:46.726240177+00:00
++ return 0
+++ get_running_pod kibana
+++ oc get pods -l component=kibana
+++ awk -v sel=kibana '$1 ~ sel && $3 == "Running" {print $1}'
++ kpod=logging-kibana-1-nxzg9
++ '[' 0 = 0 ']'
++ curl_es_from_kibana logging-kibana-1-nxzg9 logging-es project.logging _search message f629100c-8636-4d6b-91fa-ba31e64c5afe
++ python test-viaq-data-model.py test5 allow_empty
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s -k --cert /etc/kibana/keys/cert --key /etc/kibana/keys/key -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer 2jcwR9RrLfc74_vuEgMdd8YOf8LPg2UOtrIilUyUFmo' -H 'X-Forwarded-For: 127.0.0.1' 'https://logging-es:9200/project.logging*/_search?q=message:f629100c-8636-4d6b-91fa-ba31e64c5afe'
++ :
++ '[' 0 = 0 ']'
++ curl_es_from_kibana logging-kibana-1-nxzg9 logging-es-ops .operations _search message ba7aeca8-b88a-4359-80d9-5b3347e0c1fd
++ python test-viaq-data-model.py test5 allow_empty
++ oc exec logging-kibana-1-nxzg9 -c kibana -- curl --connect-timeout 1 -s -k --cert /etc/kibana/keys/cert --key /etc/kibana/keys/key -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer 2jcwR9RrLfc74_vuEgMdd8YOf8LPg2UOtrIilUyUFmo' -H 'X-Forwarded-For: 127.0.0.1' 'https://logging-es-ops:9200/.operations*/_search?q=message:ba7aeca8-b88a-4359-80d9-5b3347e0c1fd'
++ :
++ '[' 0 '!=' 0 ']'
++ return 0
++ cleanup
++ remove_test_volume
++ oc get template logging-fluentd-template -o json
++ python -c 'import json, sys; obj = json.loads(sys.stdin.read()); vm = obj["objects"][0]["spec"]["template"]["spec"]["containers"][0]["volumeMounts"]; obj["objects"][0]["spec"]["template"]["spec"]["containers"][0]["volumeMounts"] = [xx for xx in vm if xx["name"] != "cdmtest"]; vs = obj["objects"][0]["spec"]["template"]["spec"]["volumes"]; obj["objects"][0]["spec"]["template"]["spec"]["volumes"] = [xx for xx in vs if xx["name"] != "cdmtest"]; print json.dumps(obj, indent=2)'
++ oc replace -f -
template "logging-fluentd-template" replaced
++ remove_cdm_env
++ oc get template logging-fluentd-template -o yaml
++ sed '/- name: CDM_/,/value:/d'
++ oc replace -f -
template "logging-fluentd-template" replaced
++ rm -f /tmp/tmp.dJEojPjGvg
++ restart_fluentd
++ oc delete daemonset logging-fluentd
daemonset "logging-fluentd" deleted
++ wait_for_pod_ACTION stop logging-fluentd-hlw91
++ local ii=120
++ local incr=10
++ '[' stop = start ']'
++ curpod=logging-fluentd-hlw91
++ '[' -z logging-fluentd-hlw91 -a -n '' ']'
++ '[' 120 -gt 0 ']'
++ '[' stop = stop ']'
++ oc describe pod/logging-fluentd-hlw91
++ '[' stop = start ']'
++ break
++ '[' 120 -le 0 ']'
++ return 0
++ oc process logging-fluentd-template
++ oc create -f -
daemonset "logging-fluentd" created
++ wait_for_pod_ACTION start fluentd
++ local ii=120
++ local incr=10
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
pod for component=fluentd not running yet
++ curpod=
++ '[' 120 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z '' ']'
++ '[' -n '' ']'
++ '[' -n 1 ']'
++ echo pod for component=fluentd not running yet
++ sleep 10
+++ expr 120 - 10
++ ii=110
++ '[' start = start ']'
+++ get_running_pod fluentd
+++ oc get pods -l component=fluentd
+++ awk -v sel=fluentd '$1 ~ sel && $3 == "Running" {print $1}'
SKIPPING reinstall test for now
++ curpod=logging-fluentd-n4khx
++ '[' 110 -gt 0 ']'
++ '[' start = stop ']'
++ '[' start = start ']'
++ '[' -z logging-fluentd-n4khx ']'
++ break
++ '[' 110 -le 0 ']'
++ return 0
/data/src/github.com/openshift/origin-aggregated-logging/hack/lib/log/system.sh: line 31:  4015 Terminated              sar -A -o "${binary_logfile}" 1 86400 > /dev/null 2> "${stderr_logfile}"  (wd: /data/src/github.com/openshift/origin-aggregated-logging)
[INFO] [CLEANUP] Beginning cleanup routines...
[INFO] [CLEANUP] Dumping cluster events to /tmp/origin-aggregated-logging/artifacts/events.txt
[INFO] [CLEANUP] Dumping etcd contents to /tmp/origin-aggregated-logging/artifacts/etcd
[WARNING] No compiled `etcdhelper` binary was found. Attempting to build one using:
[WARNING]   $ hack/build-go.sh tools/etcdhelper
++ Building go targets for linux/amd64: tools/etcdhelper
/data/src/github.com/openshift/origin-aggregated-logging/../origin/hack/build-go.sh took 185 seconds
[INFO] [CLEANUP] Dumping container logs to /tmp/origin-aggregated-logging/logs/containers
[INFO] [CLEANUP] Truncating log files over 200M
[INFO] [CLEANUP] Stopping docker containers
[INFO] [CLEANUP] Removing docker containers
Error response from daemon: You cannot remove a running container bea3bf43f8bc3141ef3031dde833cd1e6f708dcabe1664718237b3f6ef4e104c. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 9952c570c4f06b79cadb37bf178af60e7df3acef1b3f236ca1a25f2fc0df2332. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 5f8edc5f3918e616e4861c9c0d55ef7769de959f5bd7453d9eff91525e018974. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container fc510ffd2142604e7563b613e3a3cd4d16cf14ff49733278c3c2c4137c61c868. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 384993f64490d228889f42a2cb840051096ee943430c7867a87feee8a1532f4e. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 82a4773aa4285b0a475bb5a4ba26b36521c4d36a40e9f6424aea0baeeb55c135. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container a966aa20a60f290958428a247fba324c1b74f8632ea33c1af890d2440b013946. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 38f142c997dca472c96af732b2cbc678be7999106270825a7f43304afcb75249. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 7c2f49787b33a07f616c2688432dd511839168c5d0d86130438c06afb738b53c. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 33ccd98cbc765603140f7a718d2aee1de1c1184583972a31ef5409b51f8fa218. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container aba6562b23d64c26a528d22249487073156a75bee337020f46f69b05e8ae436f. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container c086cc8d8315ce225980791113c9840d4e796c40cc5a724cf94f0a5f5ff43e11. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container b64c8da1d82a1e394f4200acc13db108c86d46313476f325f7ebf78fd59aca62. Stop the container before attempting removal or use -f
[INFO] [CLEANUP] Killing child processes
[INFO] [CLEANUP] Pruning etcd data directory
[INFO] /data/src/github.com/openshift/origin-aggregated-logging/logging.sh exited with code 0 after 01h 12m 12s
/data/src/github.com/openshift/origin-aggregated-logging
+ popd
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: RUN LOGGING TESTS [01h 12m 13s] ##########
[PostBuildScript] - Execution post build scripts.
[workspace] $ /bin/bash /tmp/hudson8952776583815244279.sh
########## STARTING STAGE: DOWNLOAD ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ export PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts
+ rm -rf /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts
+ mkdir /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo stat /data/src/github.com/openshift/origin/_output/scripts
  File: ‘/data/src/github.com/openshift/origin/_output/scripts’
  Size: 59        	Blocks: 0          IO Block: 4096   directory
Device: ca02h/51714d	Inode: 51274771    Links: 5
Access: (2755/drwxr-sr-x)  Uid: ( 1002/  origin)   Gid: ( 1004/origin-git)
Context: unconfined_u:object_r:default_t:s0
Access: 2017-06-09 11:03:44.022596023 -0400
Modify: 2017-06-09 11:26:57.925513166 -0400
Change: 2017-06-09 11:26:57.925513166 -0400
 Birth: -
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod -R o+rX /data/src/github.com/openshift/origin/_output/scripts
+ scp -r -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel:/data/src/github.com/openshift/origin/_output/scripts /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo stat /data/src/github.com/openshift/origin-aggregated-logging/_output/scripts
  File: ‘/data/src/github.com/openshift/origin-aggregated-logging/_output/scripts’
  Size: 38        	Blocks: 0          IO Block: 4096   directory
Device: ca02h/51714d	Inode: 76023304    Links: 3
Access: (2755/drwxr-sr-x)  Uid: ( 1002/  origin)   Gid: ( 1004/origin-git)
Context: unconfined_u:object_r:default_t:s0
Access: 2017-06-09 12:10:50.523539500 -0400
Modify: 2017-06-09 12:08:26.482559971 -0400
Change: 2017-06-09 12:08:26.482559971 -0400
 Birth: -
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod -R o+rX /data/src/github.com/openshift/origin-aggregated-logging/_output/scripts
+ scp -r -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel:/data/src/github.com/openshift/origin-aggregated-logging/_output/scripts /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts
+ tree /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts
/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts
└── scripts
    ├── build-base-images
    │   ├── artifacts
    │   ├── logs
    │   └── openshift.local.home
    ├── build-images
    │   ├── artifacts
    │   ├── logs
    │   └── openshift.local.home
    ├── env
    │   ├── artifacts
    │   ├── logs
    │   │   └── scripts.log
    │   └── openshift.local.home
    └── origin-aggregated-logging
        └── openshift.local.home

15 directories, 1 file
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DOWNLOAD ARTIFACTS FROM THE REMOTE HOST [00h 00m 02s] ##########
[workspace] $ /bin/bash /tmp/hudson1004998142437003659.sh
########## STARTING STAGE: GENERATE ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ export PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts/generated
+ rm -rf /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts/generated
+ mkdir /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts/generated
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo docker version && sudo docker info && sudo docker images && sudo docker ps -a 2>&1'
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo df -h && sudo vgs && sudo lvs 2>&1'
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo yum list installed 2>&1'
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo ausearch -m avc 2>&1'
+ true
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl _PID=1 --no-pager --all --lines=all 2>&1'
+ tree /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts/generated
/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts/generated
├── avc_denials.log
├── docker.info
├── filesystem.info
├── installed_packages.log
└── pid1.journal

0 directories, 5 files
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: GENERATE ARTIFACTS FROM THE REMOTE HOST [00h 00m 09s] ##########
[workspace] $ /bin/bash /tmp/hudson5433685167769348358.sh
########## STARTING STAGE: FETCH SYSTEMD JOURNALS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ export PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts/journals
+ rm -rf /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts/journals
+ mkdir /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts/journals
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit docker.service --no-pager --all --lines=all
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit origin-master.service --no-pager --all --lines=all
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit origin-node.service --no-pager --all --lines=all
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit openvswitch.service --no-pager --all --lines=all
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit ovs-vswitchd.service --no-pager --all --lines=all
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit ovsdb-server.service --no-pager --all --lines=all
+ ssh -F ./.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit etcd.service --no-pager --all --lines=all
+ tree /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts/journals
/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/artifacts/journals
├── docker.service
├── etcd.service
├── openvswitch.service
├── origin-master.service
├── origin-node.service
├── ovsdb-server.service
└── ovs-vswitchd.service

0 directories, 7 files
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: FETCH SYSTEMD JOURNALS FROM THE REMOTE HOST [00h 00m 03s] ##########
[workspace] $ /bin/bash /tmp/hudson9156375445730815424.sh
########## STARTING STAGE: DEPROVISION CLOUD RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate ]]
+ source /var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890
++ export PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config
+ oct deprovision

PLAYBOOK: main.yml *************************************************************
4 plays in /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml

PLAY [ensure we have the parameters necessary to deprovision virtual hosts] ****

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:9
skipping: [localhost] => (item=origin_ci_inventory_dir)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 13:20:46.512021", 
    "item": "origin_ci_inventory_dir", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_region)  => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 13:20:46.517632", 
    "item": "origin_ci_aws_region", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

PLAY [deprovision virtual hosts in EC2] ****************************************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

TASK [deprovision a virtual EC2 host] ******************************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:28
included: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml for localhost

TASK [update the SSH configuration to remove AWS EC2 specifics] ****************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:2
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2017-06-09 13:20:47.388315", 
    "msg": ""
}

TASK [rename EC2 instance for termination reaper] ******************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:8
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2017-06-09 13:20:48.024249", 
    "msg": "Tags {'Name': 'terminate'} created for resource i-0c2f17a3c842910cc."
}

TASK [tear down the EC2 instance] **********************************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:15
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2017-06-09 13:20:48.972807", 
    "instance_ids": [
        "i-0c2f17a3c842910cc"
    ], 
    "instances": [
        {
            "ami_launch_index": "0", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-0c35e37cab2fd5b4f"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-05555f2f10ac24a54"
                }
            }, 
            "dns_name": "ec2-34-207-254-240.compute-1.amazonaws.com", 
            "ebs_optimized": false, 
            "groups": {
                "sg-7e73221a": "default"
            }, 
            "hypervisor": "xen", 
            "id": "i-0c2f17a3c842910cc", 
            "image_id": "ami-2494ca32", 
            "instance_type": "m4.xlarge", 
            "kernel": null, 
            "key_name": "libra", 
            "launch_time": "2017-06-09T16:03:07.000Z", 
            "placement": "us-east-1d", 
            "private_dns_name": "ip-172-18-7-3.ec2.internal", 
            "private_ip": "172.18.7.3", 
            "public_dns_name": "ec2-34-207-254-240.compute-1.amazonaws.com", 
            "public_ip": "34.207.254.240", 
            "ramdisk": null, 
            "region": "us-east-1", 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "running", 
            "state_code": 16, 
            "tags": {
                "Name": "terminate", 
                "openshift_etcd": "", 
                "openshift_master": "", 
                "openshift_node": ""
            }, 
            "tenancy": "default", 
            "virtualization_type": "hvm"
        }
    ], 
    "tagged_instances": []
}

TASK [remove the serialized host variables] ************************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:21
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2017-06-09 13:20:49.235977", 
    "path": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory/host_vars/172.18.7.3.yml", 
    "state": "absent"
}

PLAY [deprovision virtual hosts locally manged by Vagrant] *********************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

PLAY [clean up local configuration for deprovisioned instances] ****************

TASK [remove inventory configuration directory] ********************************
task path: /var/lib/jenkins/origin-ci-tool/9aea3b4f81e266b026e21975a3a6a5a1cfddd890/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:61
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2017-06-09 13:20:49.749412", 
    "path": "/var/lib/jenkins/jobs/test_branch_origin_aggregated_logging_prior/workspace/.config/origin-ci-tool/inventory", 
    "state": "absent"
}

PLAY RECAP *********************************************************************
localhost                  : ok=8    changed=4    unreachable=0    failed=0   

+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPROVISION CLOUD RESOURCES [00h 00m 04s] ##########
Archiving artifacts
Recording test results
Finished: SUCCESS