FailedConsole Output

Skipping 330 KB.. Full Log
                   {
                                    "name": "kibana-proxy", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-kibana-proxy"
                                    }
                                }
                            ]
                        }
                    }, 
                    "test": false, 
                    "triggers": [
                        {
                            "type": "ConfigChange"
                        }
                    ]
                }, 
                "status": {
                    "availableReplicas": 0, 
                    "conditions": [
                        {
                            "lastTransitionTime": "2017-06-09T14:39:01Z", 
                            "lastUpdateTime": "2017-06-09T14:39:01Z", 
                            "message": "Deployment config does not have minimum availability.", 
                            "status": "False", 
                            "type": "Available"
                        }, 
                        {
                            "lastTransitionTime": "2017-06-09T14:39:02Z", 
                            "lastUpdateTime": "2017-06-09T14:39:02Z", 
                            "message": "replication controller \"logging-kibana-ops-1\" is waiting for pod \"logging-kibana-ops-1-deploy\" to run", 
                            "status": "Unknown", 
                            "type": "Progressing"
                        }
                    ], 
                    "details": {
                        "causes": [
                            {
                                "type": "ConfigChange"
                            }
                        ], 
                        "message": "config change"
                    }, 
                    "latestVersion": 1, 
                    "observedGeneration": 2, 
                    "replicas": 0, 
                    "unavailableReplicas": 0, 
                    "updatedReplicas": 0
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_kibana : Delete temp directory] ************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_kibana/tasks/main.yaml:252
ok: [openshift] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-aMED3y", 
    "state": "absent"
}

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:195
statically included: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml

TASK [openshift_logging_curator : fail] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:3
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:7
ok: [openshift] => {
    "ansible_facts": {
        "curator_version": "3_5"
    }, 
    "changed": false
}

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : fail] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:15
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : Create temp directory for doing work in] *****
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:5
ok: [openshift] => {
    "changed": false, 
    "cmd": [
        "mktemp", 
        "-d", 
        "/tmp/openshift-logging-ansible-XXXXXX"
    ], 
    "delta": "0:00:00.009591", 
    "end": "2017-06-09 10:39:04.206757", 
    "rc": 0, 
    "start": "2017-06-09 10:39:04.197166"
}

STDOUT:

/tmp/openshift-logging-ansible-iHgrng

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:10
ok: [openshift] => {
    "ansible_facts": {
        "tempdir": "/tmp/openshift-logging-ansible-iHgrng"
    }, 
    "changed": false
}

TASK [openshift_logging_curator : Create templates subdirectory] ***************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:14
ok: [openshift] => {
    "changed": false, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/openshift-logging-ansible-iHgrng/templates", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 0
}

TASK [openshift_logging_curator : Create Curator service account] **************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:24
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : Create Curator service account] **************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:32
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get sa aggregated-logging-curator -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "imagePullSecrets": [
                    {
                        "name": "aggregated-logging-curator-dockercfg-kmsqd"
                    }
                ], 
                "kind": "ServiceAccount", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T14:39:05Z", 
                    "name": "aggregated-logging-curator", 
                    "namespace": "logging", 
                    "resourceVersion": "1504", 
                    "selfLink": "/api/v1/namespaces/logging/serviceaccounts/aggregated-logging-curator", 
                    "uid": "633a2289-4d21-11e7-83b0-0e6fb895db82"
                }, 
                "secrets": [
                    {
                        "name": "aggregated-logging-curator-token-mdrm0"
                    }, 
                    {
                        "name": "aggregated-logging-curator-dockercfg-kmsqd"
                    }
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : copy] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:41
ok: [openshift] => {
    "changed": false, 
    "checksum": "9008efd9a8892dcc42c28c6dfb6708527880a6d8", 
    "dest": "/tmp/openshift-logging-ansible-iHgrng/curator.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "5498c5fd98f3dd06e34b20eb1f55dc12", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:admin_home_t:s0", 
    "size": 320, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497019145.67-36923368276207/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_curator : copy] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:47
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : Set Curator configmap] ***********************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:53
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get configmap logging-curator -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "data": {
                    "config.yaml": "# Logging example curator config file\n\n# uncomment and use this to override the defaults from env vars\n#.defaults:\n#  delete:\n#    days: 30\n#  runhour: 0\n#  runminute: 0\n\n# to keep ops logs for a different duration:\n#.operations:\n#  delete:\n#    weeks: 8\n\n# example for a normal project\n#myapp:\n#  delete:\n#    weeks: 1\n"
                }, 
                "kind": "ConfigMap", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T14:39:06Z", 
                    "name": "logging-curator", 
                    "namespace": "logging", 
                    "resourceVersion": "1520", 
                    "selfLink": "/api/v1/namespaces/logging/configmaps/logging-curator", 
                    "uid": "6411aa37-4d21-11e7-83b0-0e6fb895db82"
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : Set Curator secret] **************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:62
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc secrets new logging-curator ca=/etc/origin/logging/ca.crt key=/etc/origin/logging/system.logging.curator.key cert=/etc/origin/logging/system.logging.curator.crt -n logging", 
        "results": "", 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:75
ok: [openshift] => {
    "ansible_facts": {
        "curator_component": "curator", 
        "curator_name": "logging-curator"
    }, 
    "changed": false
}

TASK [openshift_logging_curator : Generate Curator deploymentconfig] ***********
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:81
ok: [openshift] => {
    "changed": false, 
    "checksum": "674dfe0c6e9c0d9b4e3c74b9072d086f7985adde", 
    "dest": "/tmp/openshift-logging-ansible-iHgrng/templates/curator-dc.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "c6ded9fd24af49942a05ed8e78bf3968", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:admin_home_t:s0", 
    "size": 2339, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497019147.98-54781495807966/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_curator : Set Curator DC] ******************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:99
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get dc logging-curator -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "DeploymentConfig", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T14:39:08Z", 
                    "generation": 2, 
                    "labels": {
                        "component": "curator", 
                        "logging-infra": "curator", 
                        "provider": "openshift"
                    }, 
                    "name": "logging-curator", 
                    "namespace": "logging", 
                    "resourceVersion": "1540", 
                    "selfLink": "/oapi/v1/namespaces/logging/deploymentconfigs/logging-curator", 
                    "uid": "6570c974-4d21-11e7-83b0-0e6fb895db82"
                }, 
                "spec": {
                    "replicas": 1, 
                    "selector": {
                        "component": "curator", 
                        "logging-infra": "curator", 
                        "provider": "openshift"
                    }, 
                    "strategy": {
                        "activeDeadlineSeconds": 21600, 
                        "recreateParams": {
                            "timeoutSeconds": 600
                        }, 
                        "resources": {}, 
                        "rollingParams": {
                            "intervalSeconds": 1, 
                            "maxSurge": "25%", 
                            "maxUnavailable": "25%", 
                            "timeoutSeconds": 600, 
                            "updatePeriodSeconds": 1
                        }, 
                        "type": "Recreate"
                    }, 
                    "template": {
                        "metadata": {
                            "creationTimestamp": null, 
                            "labels": {
                                "component": "curator", 
                                "logging-infra": "curator", 
                                "provider": "openshift"
                            }, 
                            "name": "logging-curator"
                        }, 
                        "spec": {
                            "containers": [
                                {
                                    "env": [
                                        {
                                            "name": "K8S_HOST_URL", 
                                            "value": "https://kubernetes.default.svc.cluster.local"
                                        }, 
                                        {
                                            "name": "ES_HOST", 
                                            "value": "logging-es"
                                        }, 
                                        {
                                            "name": "ES_PORT", 
                                            "value": "9200"
                                        }, 
                                        {
                                            "name": "ES_CLIENT_CERT", 
                                            "value": "/etc/curator/keys/cert"
                                        }, 
                                        {
                                            "name": "ES_CLIENT_KEY", 
                                            "value": "/etc/curator/keys/key"
                                        }, 
                                        {
                                            "name": "ES_CA", 
                                            "value": "/etc/curator/keys/ca"
                                        }, 
                                        {
                                            "name": "CURATOR_DEFAULT_DAYS", 
                                            "value": "30"
                                        }, 
                                        {
                                            "name": "CURATOR_RUN_HOUR", 
                                            "value": "0"
                                        }, 
                                        {
                                            "name": "CURATOR_RUN_MINUTE", 
                                            "value": "0"
                                        }, 
                                        {
                                            "name": "CURATOR_RUN_TIMEZONE", 
                                            "value": "UTC"
                                        }, 
                                        {
                                            "name": "CURATOR_SCRIPT_LOG_LEVEL", 
                                            "value": "INFO"
                                        }, 
                                        {
                                            "name": "CURATOR_LOG_LEVEL", 
                                            "value": "ERROR"
                                        }
                                    ], 
                                    "image": "172.30.224.2:5000/logging/logging-curator:latest", 
                                    "imagePullPolicy": "Always", 
                                    "name": "curator", 
                                    "resources": {
                                        "limits": {
                                            "cpu": "100m"
                                        }
                                    }, 
                                    "terminationMessagePath": "/dev/termination-log", 
                                    "terminationMessagePolicy": "File", 
                                    "volumeMounts": [
                                        {
                                            "mountPath": "/etc/curator/keys", 
                                            "name": "certs", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/curator/settings", 
                                            "name": "config", 
                                            "readOnly": true
                                        }
                                    ]
                                }
                            ], 
                            "dnsPolicy": "ClusterFirst", 
                            "restartPolicy": "Always", 
                            "schedulerName": "default-scheduler", 
                            "securityContext": {}, 
                            "serviceAccount": "aggregated-logging-curator", 
                            "serviceAccountName": "aggregated-logging-curator", 
                            "terminationGracePeriodSeconds": 30, 
                            "volumes": [
                                {
                                    "name": "certs", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-curator"
                                    }
                                }, 
                                {
                                    "configMap": {
                                        "defaultMode": 420, 
                                        "name": "logging-curator"
                                    }, 
                                    "name": "config"
                                }
                            ]
                        }
                    }, 
                    "test": false, 
                    "triggers": [
                        {
                            "type": "ConfigChange"
                        }
                    ]
                }, 
                "status": {
                    "availableReplicas": 0, 
                    "conditions": [
                        {
                            "lastTransitionTime": "2017-06-09T14:39:08Z", 
                            "lastUpdateTime": "2017-06-09T14:39:08Z", 
                            "message": "Deployment config does not have minimum availability.", 
                            "status": "False", 
                            "type": "Available"
                        }, 
                        {
                            "lastTransitionTime": "2017-06-09T14:39:09Z", 
                            "lastUpdateTime": "2017-06-09T14:39:09Z", 
                            "message": "replication controller \"logging-curator-1\" is waiting for pod \"logging-curator-1-deploy\" to run", 
                            "status": "Unknown", 
                            "type": "Progressing"
                        }
                    ], 
                    "details": {
                        "causes": [
                            {
                                "type": "ConfigChange"
                            }
                        ], 
                        "message": "config change"
                    }, 
                    "latestVersion": 1, 
                    "observedGeneration": 2, 
                    "replicas": 0, 
                    "unavailableReplicas": 0, 
                    "updatedReplicas": 0
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : Delete temp directory] ***********************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:109
ok: [openshift] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-iHgrng", 
    "state": "absent"
}

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:207
statically included: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml

TASK [openshift_logging_curator : fail] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:3
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:7
ok: [openshift] => {
    "ansible_facts": {
        "curator_version": "3_5"
    }, 
    "changed": false
}

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : fail] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/determine_version.yaml:15
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : Create temp directory for doing work in] *****
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:5
ok: [openshift] => {
    "changed": false, 
    "cmd": [
        "mktemp", 
        "-d", 
        "/tmp/openshift-logging-ansible-XXXXXX"
    ], 
    "delta": "0:00:00.004080", 
    "end": "2017-06-09 10:39:11.954336", 
    "rc": 0, 
    "start": "2017-06-09 10:39:11.950256"
}

STDOUT:

/tmp/openshift-logging-ansible-vywoUO

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:10
ok: [openshift] => {
    "ansible_facts": {
        "tempdir": "/tmp/openshift-logging-ansible-vywoUO"
    }, 
    "changed": false
}

TASK [openshift_logging_curator : Create templates subdirectory] ***************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:14
ok: [openshift] => {
    "changed": false, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/openshift-logging-ansible-vywoUO/templates", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 0
}

TASK [openshift_logging_curator : Create Curator service account] **************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:24
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : Create Curator service account] **************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:32
ok: [openshift] => {
    "changed": false, 
    "results": {
        "cmd": "/bin/oc get sa aggregated-logging-curator -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "imagePullSecrets": [
                    {
                        "name": "aggregated-logging-curator-dockercfg-kmsqd"
                    }
                ], 
                "kind": "ServiceAccount", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T14:39:05Z", 
                    "name": "aggregated-logging-curator", 
                    "namespace": "logging", 
                    "resourceVersion": "1504", 
                    "selfLink": "/api/v1/namespaces/logging/serviceaccounts/aggregated-logging-curator", 
                    "uid": "633a2289-4d21-11e7-83b0-0e6fb895db82"
                }, 
                "secrets": [
                    {
                        "name": "aggregated-logging-curator-token-mdrm0"
                    }, 
                    {
                        "name": "aggregated-logging-curator-dockercfg-kmsqd"
                    }
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : copy] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:41
ok: [openshift] => {
    "changed": false, 
    "checksum": "9008efd9a8892dcc42c28c6dfb6708527880a6d8", 
    "dest": "/tmp/openshift-logging-ansible-vywoUO/curator.yml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "5498c5fd98f3dd06e34b20eb1f55dc12", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:admin_home_t:s0", 
    "size": 320, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497019153.02-200314772942038/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_curator : copy] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:47
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_curator : Set Curator configmap] ***********************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:53
ok: [openshift] => {
    "changed": false, 
    "results": {
        "cmd": "/bin/oc get configmap logging-curator -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "data": {
                    "config.yaml": "# Logging example curator config file\n\n# uncomment and use this to override the defaults from env vars\n#.defaults:\n#  delete:\n#    days: 30\n#  runhour: 0\n#  runminute: 0\n\n# to keep ops logs for a different duration:\n#.operations:\n#  delete:\n#    weeks: 8\n\n# example for a normal project\n#myapp:\n#  delete:\n#    weeks: 1\n"
                }, 
                "kind": "ConfigMap", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T14:39:06Z", 
                    "name": "logging-curator", 
                    "namespace": "logging", 
                    "resourceVersion": "1520", 
                    "selfLink": "/api/v1/namespaces/logging/configmaps/logging-curator", 
                    "uid": "6411aa37-4d21-11e7-83b0-0e6fb895db82"
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : Set Curator secret] **************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:62
ok: [openshift] => {
    "changed": false, 
    "results": {
        "apiVersion": "v1", 
        "data": {
            "ca": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMyakNDQWNLZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUwTXpjME4xb1hEVEl5TURZd09ERTBNemMwT0ZvdwpIakVjTUJvR0ExVUVBeE1UYkc5bloybHVaeTF6YVdkdVpYSXRkR1Z6ZERDQ0FTSXdEUVlKS29aSWh2Y05BUUVCCkJRQURnZ0VQQURDQ0FRb0NnZ0VCQU4vVnhpck9Ub0Z1OUhrSUVtQVMwU0RlbXE0LzF1RSt3dWxoYWVMWFZXVVEKZ05QVSt3ZTlMRGw5bXJNYUN1VUFKall0YUNxdktxbGpLNUtSWVd4dldIK09lRkd0bmZXajN3RllUL0NIRXpIWAozclR3OVBzU1grelRabHlEUXl5ekZ6S29KNm15MGlsU2NwcXdMcEpIMk9tdjE2Sk12VytKck13Q2FmUURESTMvCkZPZWFSdndVcEUxVlFuK0wvRTJGcU02SlRwQ3ZKVTB3TGp4Rkg1b3ZpSDNDdXBFR29sYW1JNHhXaXp5dUR0amUKNnpuTGJPOGJ2a1BFZkk5MHN1TTR0TlpGOVlyd3VHaS9mUkN4U2c3dXc1dUlocU5xQ3RWSUVGSHN5Qng0Vm96eApTbGhSaHJzR1JuRkdDR2lWM3FVemI3aWZPN05jVUFBMzVzQUZWb0pzUUJzQ0F3RUFBYU1qTUNFd0RnWURWUjBQCkFRSC9CQVFEQWdLa01BOEdBMVVkRXdFQi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFISFoKaEtMM0cxdjZPMjhpcHByb2cxT1k3aDFIQWgrUDZzRmdEczlwbER2WnhnaER5SWdPUEt3R0FYZW9JTlkwZ1dSNApDdEJUeDJWWE82b0dxM1Yrc0pNR2ZjaVVBRzlCaDZneGh5UHIwTkY4aDlWcGQzdFJMVW9hNHVRZ0tSdHYrNlRBCnJISzdpVTF5RmhNSTNKSjF3dllaZjdVNWI4dTNrYnphbUl5WnNqMlJWOXo1cVh1U1lhY0gyd3lEWjh3UjJBdDQKRzZDT0VwNGsxMTg0TmpOZlBtTXAybi8xcFllUjJoTWtIUVlCSitzK2sxWTV5QVNJNEdBWDF6Skt6TWJ4eEhmTgpVYk1RcjBXdUdIMkRmRlVubFJwbWJaeGZWdEpZK1ppQXEwVHVyUnhmNnlpUXE3TDNJZ0YzMERWYWR6V2ZpOTdmCjltS2pEb0tOSUVOaG00Y1NVVnc9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", 
            "cert": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURSakNDQWk2Z0F3SUJBZ0lCQkRBTkJna3Foa2lHOXcwQkFRVUZBREFlTVJ3d0dnWURWUVFERXhOc2IyZG4KYVc1bkxYTnBaMjVsY2kxMFpYTjBNQjRYRFRFM01EWXdPVEUwTXpjMU5Gb1hEVEU1TURZd09URTBNemMxTkZvdwpSekVRTUE0R0ExVUVDZ3dIVEc5bloybHVaekVTTUJBR0ExVUVDd3dKVDNCbGJsTm9hV1owTVI4d0hRWURWUVFECkRCWnplWE4wWlcwdWJHOW5aMmx1Wnk1amRYSmhkRzl5TUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEEKTUlJQkNnS0NBUUVBdy9qamxTSjB3RTFrZDVTeXQwUXhiNHdsalZwdVYvWmtGZkVjRUR4T014NXVqSmpHeGlpbwpqd0IrUG11Mkx2cmxNbDJWU29IM2VMWUIwc004R2VWV25GS1FNeXh6cUkrZEVUTThjekthK1F5WlpIc2k5dGJnClJCeElVZCtqOStCU3dMdDE0Mm8xWnlsYlQvTFloYiszdXdsRk1NMVE0YlZDb3FFTFBCVi9HUjFySFZxS2dUOW0Kci8vazA3N1ZvNm95T0RSeE5kQW1LQjNWckxCZ243VTFYRTloWkVxUGtTRnQ4UDFqVFNtTzkwd1BsaUQ5MG9RcgpCR3loQktLSFFsb3EvMzRNdmt0VldUcGpoWUordGlhckRrT0dPZUVoM3RKbzhxb2lFNE9QVHRvenpleU00cjNvCmtzeDhXYVFrSU5sVk1QK0F3eThONzRHMldSZE5KQm1rVHdJREFRQUJvMll3WkRBT0JnTlZIUThCQWY4RUJBTUMKQmFBd0NRWURWUjBUQkFJd0FEQWRCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0RBUVlJS3dZQkJRVUhBd0l3SFFZRApWUjBPQkJZRUZBRHYzWFFLemNMQ2czZzNSTng4dnUwZlVyQUZNQWtHQTFVZEl3UUNNQUF3RFFZSktvWklodmNOCkFRRUZCUUFEZ2dFQkFONnlKZWY5ckFOSXUvRXh4NzFUNnRTMEQ1VTl1VHdqZXZTYW9QRVluWERWbElZbnc5Vi8KamNFakYrN1lKTnpOZkRRMGpETVZBR2ZjSlJScVl2eWh5YllKRitJYkNkSDA1aHhwK2wrN1Bpd1YybU03aXh4TwprcnFnWDk1NjJTOWk2R29pYnZXa3IxUy9rSWxuWkFCRzI4Tmx5T3M0Qm0zY3JrZHJlY1dZTVZPZTFPemRtZTUwCjl1RGFoMHR5dlV4dHlaMHdUaXlRbHVvcGFneDNETHJiMUpCdFBwMGhDM3pYNGlXYWRlU0hTLzhNdERDeWdTVkkKalA4dlBKTVdUQ292RC9mQWxFeSs3TCttV3lyVkFpNHkyQ2NnU0JwNlhEdzBSNVpyZkc0UUZ5M0hhUFpDa2drRgp5ZEcrTnhOM1lRanN3TmtIR0Y2Ky9Qd05kaUowOGxDMHprZz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=", 
            "key": "LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2QUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktZd2dnU2lBZ0VBQW9JQkFRREQrT09WSW5UQVRXUjMKbExLM1JERnZqQ1dOV201WDltUVY4UndRUEU0ekhtNk1tTWJHS0tpUEFINCthN1l1K3VVeVhaVktnZmQ0dGdIUwp3endaNVZhY1VwQXpMSE9vajUwUk16eHpNcHI1REpsa2V5TDIxdUJFSEVoUjM2UDM0RkxBdTNYamFqVm5LVnRQCjh0aUZ2N2U3Q1VVd3pWRGh0VUtpb1FzOEZYOFpIV3NkV29xQlAyYXYvK1RUdnRXanFqSTROSEUxMENZb0hkV3MKc0dDZnRUVmNUMkZrU28rUklXM3cvV05OS1k3M1RBK1dJUDNTaENzRWJLRUVvb2RDV2lyL2ZneStTMVZaT21PRgpnbjYySnFzT1E0WTU0U0hlMG1qeXFpSVRnNDlPMmpQTjdJeml2ZWlTekh4WnBDUWcyVlV3LzREREx3M3ZnYlpaCkYwMGtHYVJQQWdNQkFBRUNnZ0VBZTgxdUtMYmR2dWFsZzQzaTRUZ3BhdWpFaUdvS3IzTXVnMVlRZm9rNmRielIKNWV4V0ZyVjAxdkplenB4Sk9hQ3l6b0NrWWE5OVlUcktLQlhDa0RGNzU4R1k2MXMzcmRNY1ROTnJhdk1iU0I2WApnUXp4WjdNVGRyUFBWRG5PWWpmS3o0c2R6STg2TVhQRkJkckt3cVA5TkNHRkhuRjJtVUJqV2s0V3hOeG5zTHJ6CjRvWDJlWFB5R0FWQklFdmwzbkVsU2dzREY2NDZzSW5vRVJYT091Q1YxWFNVMzVZTStDTkVMd1UxWiswWkplOVQKdFdHcGhicEtBMFhoWU9KL0diN2dXb056bUk1NWRwalllY3hOM3BTck9ocXpyRHZaMGdBRWVCZzJnSWhIdEJlRwpSbkF1S1FmRGkwdUVDNVMwdnFwQ1Z0QlRjaVp4eWJ3MTFWb2ltVWNkb1FLQmdRRG1ybVlRMStQSWEwRnNIRHNOCjJiVTNPeWJUYTJFV2xMNUR1MlBUdXIrVzBEc1VKZG91ZVFIUGErOHFVaDkwZnVFalNGUEVHTEdNUW9VcEtObEkKRXZDK1ZEVkNMWE1yVkY3ZzFuZTRyZnBUL0ttYzd4b3o0eHdZcHdwVmdKTGp4MTdIbkszQVEwdGQwTVV2c1dyYgpBQWx1dnNnMFI3bVhyeVpycU1veDEwS3g1d0tCZ1FEWmV6K1BEQVFCQnl6TWdNWXB2Z1JYWWs4RUczbmQ3bnlFCk5hWlY5VHFTSVlPZmwzd2NtRUI4em9kc3Jadi9VY1R0MVQ1NUNMWExtZXg4S0VsUW50djgxR3FzeVg3QSs3Uk4KUisvejIxMm91NUZMRG1nUXdraHI2KzE0UVJUQkVjZzBTN2dpM3o4M241ZnVLMzExMzg2cmNFNjFBRnpqMUk2dwpsMzU4a1JSOVdRS0JnQ1hUOVlMUGxGZmFWc2tldkFSaWJoZ1hpQjlsWFc5eGh0M2VqZGs3cDQxWXFrZDhpWkhUCllCdWVqSUs1SXRWY3RSaXZGS1Ywa3pEMys1UXJVYTVEQk4yQk81YVZrMnhJa3FKMktQM004ZDd4OFBKK055TEoKSEdOaWlReEtZRXdmOW9mdlJvTGZ4aTBsUGlDN0RGWExaTUNYWW9kSU44UUxBZGJudi9oNlgwS0RBb0dBSUljSQppSktaZnJYZUEzQTNNME1TVHllSy9laEdIK1Z0aGpDb0FpODlaV0hSMStyR1MxaVNQSmgvTk1mNmp3TmgydGlNCkVYbzJCbyt0Q1J6VmFsTjRQNlpkbGg1RThRUTBnV0tEQnd3R1JCaFpzTW9rMG16Y2ZCZTdXc01SSVpkUlJGRVcKNTFCUW1qY3FlQnJWRnU0MGNQdkpLZFUyRDNWWktoeGVkSzhXUzNFQ2dZQTdjVjVycWNMazFPTERWQWNwUkg4MApJS21hbUxKVXpJbllQMkN1dVJhOHFJMVFwQjcwbjF5cVJKbnBqNDdCVUZXQnpDcWxVNUVzZVp0MnJVZm43dURyCmhnSlBlYWQ5OFBubEM0TXdYdkh6TURhRTZoVHdna1lid0lEYjhpVkhCMnJvNE1OMDNiLzJaUFpVZTB0VXB6WE0KUmNMV0ozMU5VN2xaUlhtT05xSHJrZz09Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K"
        }, 
        "kind": "Secret", 
        "metadata": {
            "creationTimestamp": null, 
            "name": "logging-curator"
        }, 
        "type": "Opaque"
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:75
ok: [openshift] => {
    "ansible_facts": {
        "curator_component": "curator-ops", 
        "curator_name": "logging-curator-ops"
    }, 
    "changed": false
}

TASK [openshift_logging_curator : Generate Curator deploymentconfig] ***********
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:81
ok: [openshift] => {
    "changed": false, 
    "checksum": "704ab874d5e53eb04fb70ac51b0b9d90cfedd08f", 
    "dest": "/tmp/openshift-logging-ansible-vywoUO/templates/curator-dc.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "d5ffd5de466b7f6cdb2903f417adf34f", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:admin_home_t:s0", 
    "size": 2363, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497019154.7-81526706715204/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_curator : Set Curator DC] ******************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:99
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get dc logging-curator-ops -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "DeploymentConfig", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T14:39:15Z", 
                    "generation": 2, 
                    "labels": {
                        "component": "curator-ops", 
                        "logging-infra": "curator", 
                        "provider": "openshift"
                    }, 
                    "name": "logging-curator-ops", 
                    "namespace": "logging", 
                    "resourceVersion": "1586", 
                    "selfLink": "/oapi/v1/namespaces/logging/deploymentconfigs/logging-curator-ops", 
                    "uid": "696d60a3-4d21-11e7-83b0-0e6fb895db82"
                }, 
                "spec": {
                    "replicas": 1, 
                    "selector": {
                        "component": "curator-ops", 
                        "logging-infra": "curator", 
                        "provider": "openshift"
                    }, 
                    "strategy": {
                        "activeDeadlineSeconds": 21600, 
                        "recreateParams": {
                            "timeoutSeconds": 600
                        }, 
                        "resources": {}, 
                        "rollingParams": {
                            "intervalSeconds": 1, 
                            "maxSurge": "25%", 
                            "maxUnavailable": "25%", 
                            "timeoutSeconds": 600, 
                            "updatePeriodSeconds": 1
                        }, 
                        "type": "Recreate"
                    }, 
                    "template": {
                        "metadata": {
                            "creationTimestamp": null, 
                            "labels": {
                                "component": "curator-ops", 
                                "logging-infra": "curator", 
                                "provider": "openshift"
                            }, 
                            "name": "logging-curator-ops"
                        }, 
                        "spec": {
                            "containers": [
                                {
                                    "env": [
                                        {
                                            "name": "K8S_HOST_URL", 
                                            "value": "https://kubernetes.default.svc.cluster.local"
                                        }, 
                                        {
                                            "name": "ES_HOST", 
                                            "value": "logging-es-ops"
                                        }, 
                                        {
                                            "name": "ES_PORT", 
                                            "value": "9200"
                                        }, 
                                        {
                                            "name": "ES_CLIENT_CERT", 
                                            "value": "/etc/curator/keys/cert"
                                        }, 
                                        {
                                            "name": "ES_CLIENT_KEY", 
                                            "value": "/etc/curator/keys/key"
                                        }, 
                                        {
                                            "name": "ES_CA", 
                                            "value": "/etc/curator/keys/ca"
                                        }, 
                                        {
                                            "name": "CURATOR_DEFAULT_DAYS", 
                                            "value": "30"
                                        }, 
                                        {
                                            "name": "CURATOR_RUN_HOUR", 
                                            "value": "0"
                                        }, 
                                        {
                                            "name": "CURATOR_RUN_MINUTE", 
                                            "value": "0"
                                        }, 
                                        {
                                            "name": "CURATOR_RUN_TIMEZONE", 
                                            "value": "UTC"
                                        }, 
                                        {
                                            "name": "CURATOR_SCRIPT_LOG_LEVEL", 
                                            "value": "INFO"
                                        }, 
                                        {
                                            "name": "CURATOR_LOG_LEVEL", 
                                            "value": "ERROR"
                                        }
                                    ], 
                                    "image": "172.30.224.2:5000/logging/logging-curator:latest", 
                                    "imagePullPolicy": "Always", 
                                    "name": "curator", 
                                    "resources": {
                                        "limits": {
                                            "cpu": "100m"
                                        }
                                    }, 
                                    "terminationMessagePath": "/dev/termination-log", 
                                    "terminationMessagePolicy": "File", 
                                    "volumeMounts": [
                                        {
                                            "mountPath": "/etc/curator/keys", 
                                            "name": "certs", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/curator/settings", 
                                            "name": "config", 
                                            "readOnly": true
                                        }
                                    ]
                                }
                            ], 
                            "dnsPolicy": "ClusterFirst", 
                            "restartPolicy": "Always", 
                            "schedulerName": "default-scheduler", 
                            "securityContext": {}, 
                            "serviceAccount": "aggregated-logging-curator", 
                            "serviceAccountName": "aggregated-logging-curator", 
                            "terminationGracePeriodSeconds": 30, 
                            "volumes": [
                                {
                                    "name": "certs", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-curator"
                                    }
                                }, 
                                {
                                    "configMap": {
                                        "defaultMode": 420, 
                                        "name": "logging-curator"
                                    }, 
                                    "name": "config"
                                }
                            ]
                        }
                    }, 
                    "test": false, 
                    "triggers": [
                        {
                            "type": "ConfigChange"
                        }
                    ]
                }, 
                "status": {
                    "availableReplicas": 0, 
                    "conditions": [
                        {
                            "lastTransitionTime": "2017-06-09T14:39:15Z", 
                            "lastUpdateTime": "2017-06-09T14:39:15Z", 
                            "message": "Deployment config does not have minimum availability.", 
                            "status": "False", 
                            "type": "Available"
                        }, 
                        {
                            "lastTransitionTime": "2017-06-09T14:39:15Z", 
                            "lastUpdateTime": "2017-06-09T14:39:15Z", 
                            "message": "replication controller \"logging-curator-ops-1\" is waiting for pod \"logging-curator-ops-1-deploy\" to run", 
                            "status": "Unknown", 
                            "type": "Progressing"
                        }
                    ], 
                    "details": {
                        "causes": [
                            {
                                "type": "ConfigChange"
                            }
                        ], 
                        "message": "config change"
                    }, 
                    "latestVersion": 1, 
                    "observedGeneration": 2, 
                    "replicas": 0, 
                    "unavailableReplicas": 0, 
                    "updatedReplicas": 0
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_curator : Delete temp directory] ***********************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:109
ok: [openshift] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-vywoUO", 
    "state": "absent"
}

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:226
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:241
statically included: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml

TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:2
 [WARNING]: when statements should not include jinja2 templating delimiters
such as {{ }} or {% %}. Found: {{ openshift_logging_fluentd_nodeselector.keys()
| count }} > 1
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:6
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:10
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:14
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml:3
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml:7
ok: [openshift] => {
    "ansible_facts": {
        "fluentd_version": "3_5"
    }, 
    "changed": false
}

TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml:12
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml:15
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:20
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:26
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : Create temp directory for doing work in] *****
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:33
ok: [openshift] => {
    "changed": false, 
    "cmd": [
        "mktemp", 
        "-d", 
        "/tmp/openshift-logging-ansible-XXXXXX"
    ], 
    "delta": "0:00:00.002006", 
    "end": "2017-06-09 10:39:19.619753", 
    "rc": 0, 
    "start": "2017-06-09 10:39:19.617747"
}

STDOUT:

/tmp/openshift-logging-ansible-hFvrLb

TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:38
ok: [openshift] => {
    "ansible_facts": {
        "tempdir": "/tmp/openshift-logging-ansible-hFvrLb"
    }, 
    "changed": false
}

TASK [openshift_logging_fluentd : Create templates subdirectory] ***************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:41
ok: [openshift] => {
    "changed": false, 
    "gid": 0, 
    "group": "root", 
    "mode": "0755", 
    "owner": "root", 
    "path": "/tmp/openshift-logging-ansible-hFvrLb/templates", 
    "secontext": "unconfined_u:object_r:user_tmp_t:s0", 
    "size": 6, 
    "state": "directory", 
    "uid": 0
}

TASK [openshift_logging_fluentd : Create Fluentd service account] **************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:51
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : Create Fluentd service account] **************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:59
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get sa aggregated-logging-fluentd -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "kind": "ServiceAccount", 
                "metadata": {
                    "annotations": {
                        "openshift.io/create-dockercfg-secrets.pending-token": "aggregated-logging-fluentd-token-6sxnn"
                    }, 
                    "creationTimestamp": "2017-06-09T14:39:20Z", 
                    "name": "aggregated-logging-fluentd", 
                    "namespace": "logging", 
                    "resourceVersion": "1624", 
                    "selfLink": "/api/v1/namespaces/logging/serviceaccounts/aggregated-logging-fluentd", 
                    "uid": "6c61f6fc-4d21-11e7-83b0-0e6fb895db82"
                }, 
                "secrets": [
                    {
                        "name": "aggregated-logging-fluentd-token-3n7cl"
                    }
                ]
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_fluentd : Set privileged permissions for Fluentd] ******
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:68
changed: [openshift] => {
    "changed": true, 
    "present": "present", 
    "results": {
        "cmd": "/bin/oc adm policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd -n logging", 
        "results": "", 
        "returncode": 0
    }
}

TASK [openshift_logging_fluentd : Set cluster-reader permissions for Fluentd] ***
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:77
changed: [openshift] => {
    "changed": true, 
    "present": "present", 
    "results": {
        "cmd": "/bin/oc adm policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd -n logging", 
        "results": "", 
        "returncode": 0
    }
}

TASK [openshift_logging_fluentd : template] ************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:86
ok: [openshift] => {
    "changed": false, 
    "checksum": "a8c8596f5fc2c5dd7c8d33d244af17a2555be086", 
    "dest": "/tmp/openshift-logging-ansible-hFvrLb/fluent.conf", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "579698b48ffce6276ee0e8d5ac71a338", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:admin_home_t:s0", 
    "size": 1301, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497019162.78-163081589611217/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:94
ok: [openshift] => {
    "changed": false, 
    "checksum": "b3e75eddc4a0765edc77da092384c0c6f95440e1", 
    "dest": "/tmp/openshift-logging-ansible-hFvrLb/fluentd-throttle-config.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "25871b8e0a9bedc166a6029872a6c336", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:admin_home_t:s0", 
    "size": 133, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497019163.2-113128984754267/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:100
ok: [openshift] => {
    "changed": false, 
    "checksum": "a3aa36da13f3108aa4ad5b98d4866007b44e9798", 
    "dest": "/tmp/openshift-logging-ansible-hFvrLb/secure-forward.conf", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "1084b00c427f4fa48dfc66d6ad6555d4", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:admin_home_t:s0", 
    "size": 563, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497019163.5-246373269277544/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:107
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:113
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:119
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging_fluentd : Set Fluentd configmap] ***********************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:125
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get configmap logging-fluentd -o json -n logging", 
        "results": [
            {
                "apiVersion": "v1", 
                "data": {
                    "fluent.conf": "# This file is the fluentd configuration entrypoint. Edit with care.\n\n@include configs.d/openshift/system.conf\n\n# In each section below, pre- and post- includes don't include anything initially;\n# they exist to enable future additions to openshift conf as needed.\n\n## sources\n## ordered so that syslog always runs last...\n@include configs.d/openshift/input-pre-*.conf\n@include configs.d/dynamic/input-docker-*.conf\n@include configs.d/dynamic/input-syslog-*.conf\n@include configs.d/openshift/input-post-*.conf\n##\n\n<label @INGRESS>\n## filters\n  @include configs.d/openshift/filter-pre-*.conf\n  @include configs.d/openshift/filter-retag-journal.conf\n  @include configs.d/openshift/filter-k8s-meta.conf\n  @include configs.d/openshift/filter-kibana-transform.conf\n  @include configs.d/openshift/filter-k8s-flatten-hash.conf\n  @include configs.d/openshift/filter-k8s-record-transform.conf\n  @include configs.d/openshift/filter-syslog-record-transform.conf\n  @include configs.d/openshift/filter-viaq-data-model.conf\n  @include configs.d/openshift/filter-post-*.conf\n##\n\n## matches\n  @include configs.d/openshift/output-pre-*.conf\n  @include configs.d/openshift/output-operations.conf\n  @include configs.d/openshift/output-applications.conf\n  # no post - applications.conf matches everything left\n##\n</label>\n", 
                    "secure-forward.conf": "# @type secure_forward\n\n# self_hostname ${HOSTNAME}\n# shared_key <SECRET_STRING>\n\n# secure yes\n# enable_strict_verification yes\n\n# ca_cert_path /etc/fluent/keys/your_ca_cert\n# ca_private_key_path /etc/fluent/keys/your_private_key\n  # for private CA secret key\n# ca_private_key_passphrase passphrase\n\n# <server>\n  # or IP\n#   host server.fqdn.example.com\n#   port 24284\n# </server>\n# <server>\n  # ip address to connect\n#   host 203.0.113.8\n  # specify hostlabel for FQDN verification if ipaddress is used for host\n#   hostlabel server.fqdn.example.com\n# </server>\n", 
                    "throttle-config.yaml": "# Logging example fluentd throttling config file\n\n#example-project:\n#  read_lines_limit: 10\n#\n#.operations:\n#  read_lines_limit: 100\n"
                }, 
                "kind": "ConfigMap", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T14:39:24Z", 
                    "name": "logging-fluentd", 
                    "namespace": "logging", 
                    "resourceVersion": "1651", 
                    "selfLink": "/api/v1/namespaces/logging/configmaps/logging-fluentd", 
                    "uid": "6ead881a-4d21-11e7-83b0-0e6fb895db82"
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_fluentd : Set logging-fluentd secret] ******************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:137
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc secrets new logging-fluentd ca=/etc/origin/logging/ca.crt key=/etc/origin/logging/system.logging.fluentd.key cert=/etc/origin/logging/system.logging.fluentd.crt -n logging", 
        "results": "", 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_fluentd : Generate logging-fluentd daemonset definition] ***
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:154
ok: [openshift] => {
    "changed": false, 
    "checksum": "51309307ea0991a129258ea566d883d9b663beb2", 
    "dest": "/tmp/openshift-logging-ansible-hFvrLb/templates/logging-fluentd.yaml", 
    "gid": 0, 
    "group": "root", 
    "md5sum": "dc3385ee3b7b11ac7dad4a71b34cbdb3", 
    "mode": "0644", 
    "owner": "root", 
    "secontext": "unconfined_u:object_r:admin_home_t:s0", 
    "size": 3413, 
    "src": "/root/.ansible/tmp/ansible-tmp-1497019165.39-234714267889408/source", 
    "state": "file", 
    "uid": 0
}

TASK [openshift_logging_fluentd : Set logging-fluentd daemonset] ***************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:172
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc get daemonset logging-fluentd -o json -n logging", 
        "results": [
            {
                "apiVersion": "extensions/v1beta1", 
                "kind": "DaemonSet", 
                "metadata": {
                    "creationTimestamp": "2017-06-09T14:39:26Z", 
                    "generation": 1, 
                    "labels": {
                        "component": "fluentd", 
                        "logging-infra": "fluentd", 
                        "provider": "openshift"
                    }, 
                    "name": "logging-fluentd", 
                    "namespace": "logging", 
                    "resourceVersion": "1654", 
                    "selfLink": "/apis/extensions/v1beta1/namespaces/logging/daemonsets/logging-fluentd", 
                    "uid": "6fc4234a-4d21-11e7-83b0-0e6fb895db82"
                }, 
                "spec": {
                    "selector": {
                        "matchLabels": {
                            "component": "fluentd", 
                            "provider": "openshift"
                        }
                    }, 
                    "template": {
                        "metadata": {
                            "creationTimestamp": null, 
                            "labels": {
                                "component": "fluentd", 
                                "logging-infra": "fluentd", 
                                "provider": "openshift"
                            }, 
                            "name": "fluentd-elasticsearch"
                        }, 
                        "spec": {
                            "containers": [
                                {
                                    "env": [
                                        {
                                            "name": "K8S_HOST_URL", 
                                            "value": "https://kubernetes.default.svc.cluster.local"
                                        }, 
                                        {
                                            "name": "ES_HOST", 
                                            "value": "logging-es"
                                        }, 
                                        {
                                            "name": "ES_PORT", 
                                            "value": "9200"
                                        }, 
                                        {
                                            "name": "ES_CLIENT_CERT", 
                                            "value": "/etc/fluent/keys/cert"
                                        }, 
                                        {
                                            "name": "ES_CLIENT_KEY", 
                                            "value": "/etc/fluent/keys/key"
                                        }, 
                                        {
                                            "name": "ES_CA", 
                                            "value": "/etc/fluent/keys/ca"
                                        }, 
                                        {
                                            "name": "OPS_HOST", 
                                            "value": "logging-es-ops"
                                        }, 
                                        {
                                            "name": "OPS_PORT", 
                                            "value": "9200"
                                        }, 
                                        {
                                            "name": "OPS_CLIENT_CERT", 
                                            "value": "/etc/fluent/keys/cert"
                                        }, 
                                        {
                                            "name": "OPS_CLIENT_KEY", 
                                            "value": "/etc/fluent/keys/key"
                                        }, 
                                        {
                                            "name": "OPS_CA", 
                                            "value": "/etc/fluent/keys/ca"
                                        }, 
                                        {
                                            "name": "ES_COPY", 
                                            "value": "false"
                                        }, 
                                        {
                                            "name": "USE_JOURNAL", 
                                            "value": "true"
                                        }, 
                                        {
                                            "name": "JOURNAL_SOURCE"
                                        }, 
                                        {
                                            "name": "JOURNAL_READ_FROM_HEAD", 
                                            "value": "false"
                                        }
                                    ], 
                                    "image": "172.30.224.2:5000/logging/logging-fluentd:latest", 
                                    "imagePullPolicy": "Always", 
                                    "name": "fluentd-elasticsearch", 
                                    "resources": {
                                        "limits": {
                                            "cpu": "100m", 
                                            "memory": "512Mi"
                                        }
                                    }, 
                                    "securityContext": {
                                        "privileged": true
                                    }, 
                                    "terminationMessagePath": "/dev/termination-log", 
                                    "terminationMessagePolicy": "File", 
                                    "volumeMounts": [
                                        {
                                            "mountPath": "/run/log/journal", 
                                            "name": "runlogjournal"
                                        }, 
                                        {
                                            "mountPath": "/var/log", 
                                            "name": "varlog"
                                        }, 
                                        {
                                            "mountPath": "/var/lib/docker/containers", 
                                            "name": "varlibdockercontainers", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/fluent/configs.d/user", 
                                            "name": "config", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/fluent/keys", 
                                            "name": "certs", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/docker-hostname", 
                                            "name": "dockerhostname", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/localtime", 
                                            "name": "localtime", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/sysconfig/docker", 
                                            "name": "dockercfg", 
                                            "readOnly": true
                                        }, 
                                        {
                                            "mountPath": "/etc/docker", 
                                            "name": "dockerdaemoncfg", 
                                            "readOnly": true
                                        }
                                    ]
                                }
                            ], 
                            "dnsPolicy": "ClusterFirst", 
                            "nodeSelector": {
                                "logging-infra-fluentd": "true"
                            }, 
                            "restartPolicy": "Always", 
                            "schedulerName": "default-scheduler", 
                            "securityContext": {}, 
                            "serviceAccount": "aggregated-logging-fluentd", 
                            "serviceAccountName": "aggregated-logging-fluentd", 
                            "terminationGracePeriodSeconds": 30, 
                            "volumes": [
                                {
                                    "hostPath": {
                                        "path": "/run/log/journal"
                                    }, 
                                    "name": "runlogjournal"
                                }, 
                                {
                                    "hostPath": {
                                        "path": "/var/log"
                                    }, 
                                    "name": "varlog"
                                }, 
                                {
                                    "hostPath": {
                                        "path": "/var/lib/docker/containers"
                                    }, 
                                    "name": "varlibdockercontainers"
                                }, 
                                {
                                    "configMap": {
                                        "defaultMode": 420, 
                                        "name": "logging-fluentd"
                                    }, 
                                    "name": "config"
                                }, 
                                {
                                    "name": "certs", 
                                    "secret": {
                                        "defaultMode": 420, 
                                        "secretName": "logging-fluentd"
                                    }
                                }, 
                                {
                                    "hostPath": {
                                        "path": "/etc/hostname"
                                    }, 
                                    "name": "dockerhostname"
                                }, 
                                {
                                    "hostPath": {
                                        "path": "/etc/localtime"
                                    }, 
                                    "name": "localtime"
                                }, 
                                {
                                    "hostPath": {
                                        "path": "/etc/sysconfig/docker"
                                    }, 
                                    "name": "dockercfg"
                                }, 
                                {
                                    "hostPath": {
                                        "path": "/etc/docker"
                                    }, 
                                    "name": "dockerdaemoncfg"
                                }
                            ]
                        }
                    }, 
                    "templateGeneration": 1, 
                    "updateStrategy": {
                        "rollingUpdate": {
                            "maxUnavailable": 1
                        }, 
                        "type": "RollingUpdate"
                    }
                }, 
                "status": {
                    "currentNumberScheduled": 0, 
                    "desiredNumberScheduled": 0, 
                    "numberMisscheduled": 0, 
                    "numberReady": 0, 
                    "observedGeneration": 1
                }
            }
        ], 
        "returncode": 0
    }, 
    "state": "present"
}

TASK [openshift_logging_fluentd : Retrieve list of Fluentd hosts] **************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:183
ok: [openshift] => {
    "changed": false, 
    "results": {
        "cmd": "/bin/oc get node -o json -n default", 
        "results": [
            {
                "apiVersion": "v1", 
                "items": [
                    {
                        "apiVersion": "v1", 
                        "kind": "Node", 
                        "metadata": {
                            "annotations": {
                                "volumes.kubernetes.io/controller-managed-attach-detach": "true"
                            }, 
                            "creationTimestamp": "2017-06-09T14:23:20Z", 
                            "labels": {
                                "beta.kubernetes.io/arch": "amd64", 
                                "beta.kubernetes.io/os": "linux", 
                                "kubernetes.io/hostname": "172.18.4.93"
                            }, 
                            "name": "172.18.4.93", 
                            "namespace": "", 
                            "resourceVersion": "1628", 
                            "selfLink": "/api/v1/nodes/172.18.4.93", 
                            "uid": "30393f5f-4d1f-11e7-83b0-0e6fb895db82"
                        }, 
                        "spec": {
                            "externalID": "172.18.4.93", 
                            "providerID": "aws:////i-08eae8de52d2e283e"
                        }, 
                        "status": {
                            "addresses": [
                                {
                                    "address": "172.18.4.93", 
                                    "type": "LegacyHostIP"
                                }, 
                                {
                                    "address": "172.18.4.93", 
                                    "type": "InternalIP"
                                }, 
                                {
                                    "address": "172.18.4.93", 
                                    "type": "Hostname"
                                }
                            ], 
                            "allocatable": {
                                "cpu": "4", 
                                "memory": "7129288Ki", 
                                "pods": "40"
                            }, 
                            "capacity": {
                                "cpu": "4", 
                                "memory": "7231688Ki", 
                                "pods": "40"
                            }, 
                            "conditions": [
                                {
                                    "lastHeartbeatTime": "2017-06-09T14:39:21Z", 
                                    "lastTransitionTime": "2017-06-09T14:23:20Z", 
                                    "message": "kubelet has sufficient disk space available", 
                                    "reason": "KubeletHasSufficientDisk", 
                                    "status": "False", 
                                    "type": "OutOfDisk"
                                }, 
                                {
                                    "lastHeartbeatTime": "2017-06-09T14:39:21Z", 
                                    "lastTransitionTime": "2017-06-09T14:23:20Z", 
                                    "message": "kubelet has sufficient memory available", 
                                    "reason": "KubeletHasSufficientMemory", 
                                    "status": "False", 
                                    "type": "MemoryPressure"
                                }, 
                                {
                                    "lastHeartbeatTime": "2017-06-09T14:39:21Z", 
                                    "lastTransitionTime": "2017-06-09T14:23:20Z", 
                                    "message": "kubelet has no disk pressure", 
                                    "reason": "KubeletHasNoDiskPressure", 
                                    "status": "False", 
                                    "type": "DiskPressure"
                                }, 
                                {
                                    "lastHeartbeatTime": "2017-06-09T14:39:21Z", 
                                    "lastTransitionTime": "2017-06-09T14:23:20Z", 
                                    "message": "kubelet is posting ready status", 
                                    "reason": "KubeletReady", 
                                    "status": "True", 
                                    "type": "Ready"
                                }
                            ], 
                            "daemonEndpoints": {
                                "kubeletEndpoint": {
                                    "Port": 10250
                                }
                            }, 
                            "images": [
                                {
                                    "names": [
                                        "openshift/origin-federation:6acabdc", 
                                        "openshift/origin-federation:latest"
                                    ], 
                                    "sizeBytes": 1205885664
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-docker-registry@sha256:0601ffd0ff2b7258926bde100b285cf824e012438e15e1ad808ea5e3bbdecc12", 
                                        "docker.io/openshift/origin-docker-registry:latest"
                                    ], 
                                    "sizeBytes": 1100570695
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-docker-registry:6acabdc", 
                                        "openshift/origin-docker-registry:latest"
                                    ], 
                                    "sizeBytes": 1100164272
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-gitserver:6acabdc", 
                                        "openshift/origin-gitserver:latest"
                                    ], 
                                    "sizeBytes": 1086520226
                                }, 
                                {
                                    "names": [
                                        "openshift/node:6acabdc", 
                                        "openshift/node:latest"
                                    ], 
                                    "sizeBytes": 1051721928
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-keepalived-ipfailover:6acabdc", 
                                        "openshift/origin-keepalived-ipfailover:latest"
                                    ], 
                                    "sizeBytes": 1028529711
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-haproxy-router:latest"
                                    ], 
                                    "sizeBytes": 1022758742
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-docker-builder:6acabdc", 
                                        "openshift/origin-docker-builder:latest"
                                    ], 
                                    "sizeBytes": 1001728427
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-deployer:6acabdc", 
                                        "openshift/origin-deployer:latest"
                                    ], 
                                    "sizeBytes": 1001728427
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-f5-router:6acabdc", 
                                        "openshift/origin-f5-router:latest"
                                    ], 
                                    "sizeBytes": 1001728427
                                }, 
                                {
                                    "names": [
                                        "openshift/origin:6acabdc", 
                                        "openshift/origin:latest"
                                    ], 
                                    "sizeBytes": 1001728427
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-sti-builder:6acabdc", 
                                        "openshift/origin-sti-builder:latest"
                                    ], 
                                    "sizeBytes": 1001728427
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-recycler:6acabdc", 
                                        "openshift/origin-recycler:latest"
                                    ], 
                                    "sizeBytes": 1001728427
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-cluster-capacity:6acabdc", 
                                        "openshift/origin-cluster-capacity:latest"
                                    ], 
                                    "sizeBytes": 962455026
                                }, 
                                {
                                    "names": [
                                        "rhel7.1:latest"
                                    ], 
                                    "sizeBytes": 765301508
                                }, 
                                {
                                    "names": [
                                        "openshift/dind-master:latest"
                                    ], 
                                    "sizeBytes": 731456758
                                }, 
                                {
                                    "names": [
                                        "openshift/dind-node:latest"
                                    ], 
                                    "sizeBytes": 731453034
                                }, 
                                {
                                    "names": [
                                        "172.30.224.2:5000/logging/logging-auth-proxy@sha256:63567bf13e7d4ad50117140426e98a4dcf59048ec0bf0e28f4ed074f8cda8155", 
                                        "172.30.224.2:5000/logging/logging-auth-proxy:latest"
                                    ], 
                                    "sizeBytes": 715536092
                                }, 
                                {
                                    "names": [
                                        "docker.io/node@sha256:46db0dd19955beb87b841c30a6b9812ba626473283e84117d1c016deee5949a9", 
                                        "docker.io/node:0.10.36"
                                    ], 
                                    "sizeBytes": 697128386
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-logging-kibana@sha256:950568237cc7d0ff14ea9fe22c3967d888996db70c66181421ad68caeb5ba75f", 
                                        "docker.io/openshift/origin-logging-kibana:latest"
                                    ], 
                                    "sizeBytes": 682851513
                                }, 
                                {
                                    "names": [
                                        "172.30.224.2:5000/logging/logging-kibana@sha256:bcb762f029371abc58677ae894e2e1bd96d2509b05c9c863d3ecd17b05d07272", 
                                        "172.30.224.2:5000/logging/logging-kibana:latest"
                                    ], 
                                    "sizeBytes": 682851459
                                }, 
                                {
                                    "names": [
                                        "openshift/dind:latest"
                                    ], 
                                    "sizeBytes": 640650210
                                }, 
                                {
                                    "names": [
                                        "172.30.224.2:5000/logging/logging-elasticsearch@sha256:16dcdc717d95c0fabe6ec7713bfe6c9261d4de9a56962894965ae45c63a347ec", 
                                        "172.30.224.2:5000/logging/logging-elasticsearch:latest"
                                    ], 
                                    "sizeBytes": 623513030
                                }, 
                                {
                                    "names": [
                                        "172.30.224.2:5000/logging/logging-fluentd@sha256:130794deff858df95acbfe5e44921ee244118b1973ebce37aa6587765155b940", 
                                        "172.30.224.2:5000/logging/logging-fluentd:latest"
                                    ], 
                                    "sizeBytes": 472184910
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-logging-elasticsearch@sha256:6296f1719676e970438cac4d912542b35ac786c14a15df892507007c4ecbe490", 
                                        "docker.io/openshift/origin-logging-elasticsearch:latest"
                                    ], 
                                    "sizeBytes": 425567196
                                }, 
                                {
                                    "names": [
                                        "172.30.224.2:5000/logging/logging-curator@sha256:c31caeff56c054df608838150e51377d6a79e2e8a33f48750d025d7690812a65", 
                                        "172.30.224.2:5000/logging/logging-curator:latest"
                                    ], 
                                    "sizeBytes": 418288265
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/base-centos7@sha256:aea292a3bddba020cde0ee83e6a45807931eb607c164ec6a3674f67039d8cd7c", 
                                        "docker.io/openshift/base-centos7:latest"
                                    ], 
                                    "sizeBytes": 383049978
                                }, 
                                {
                                    "names": [
                                        "rhel7.2:latest"
                                    ], 
                                    "sizeBytes": 377493597
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-egress-router:6acabdc", 
                                        "openshift/origin-egress-router:latest"
                                    ], 
                                    "sizeBytes": 364745713
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-base:latest"
                                    ], 
                                    "sizeBytes": 363070172
                                }, 
                                {
                                    "names": [
                                        "<none>@<none>", 
                                        "<none>:<none>"
                                    ], 
                                    "sizeBytes": 363024702
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-logging-fluentd@sha256:cae7c21c9f111d4f5b481c14a65c597c67e715a8ffe3aee4c483100ee77296d7", 
                                        "docker.io/openshift/origin-logging-fluentd:latest"
                                    ], 
                                    "sizeBytes": 359223728
                                }, 
                                {
                                    "names": [
                                        "docker.io/fedora@sha256:69281ddd7b2600e5f2b17f1e12d7fba25207f459204fb2d15884f8432c479136", 
                                        "docker.io/fedora:25"
                                    ], 
                                    "sizeBytes": 230864375
                                }, 
                                {
                                    "names": [
                                        "docker.io/openshift/origin-logging-curator@sha256:daded10ff4e08dfb6659c964e305f16679596312da558af095835202cf66f703", 
                                        "docker.io/openshift/origin-logging-curator:latest"
                                    ], 
                                    "sizeBytes": 224977669
                                }, 
                                {
                                    "names": [
                                        "rhel7.3:latest", 
                                        "rhel7:latest"
                                    ], 
                                    "sizeBytes": 219121266
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-pod:6acabdc", 
                                        "openshift/origin-pod:latest"
                                    ], 
                                    "sizeBytes": 213199843
                                }, 
                                {
                                    "names": [
                                        "registry.access.redhat.com/rhel7.2@sha256:98e6ca5d226c26e31a95cd67716afe22833c943e1926a21daf1a030906a02249", 
                                        "registry.access.redhat.com/rhel7.2:latest"
                                    ], 
                                    "sizeBytes": 201376319
                                }, 
                                {
                                    "names": [
                                        "registry.access.redhat.com/rhel7.3@sha256:1e232401d8e0ba53b36b757b4712fbcbd1dab9c21db039c45a84871a74e89e68", 
                                        "registry.access.redhat.com/rhel7.3:latest"
                                    ], 
                                    "sizeBytes": 192693772
                                }, 
                                {
                                    "names": [
                                        "docker.io/centos@sha256:bba1de7c9d900a898e3cadbae040dfe8a633c06bc104a0df76ae24483e03c077"
                                    ], 
                                    "sizeBytes": 192548999
                                }, 
                                {
                                    "names": [
                                        "openshift/origin-source:latest"
                                    ], 
                                    "sizeBytes": 192548894
                                }, 
                                {
                                    "names": [
                                        "docker.io/centos@sha256:aebf12af704307dfa0079b3babdca8d7e8ff6564696882bcb5d11f1d461f9ee9", 
                                        "docker.io/centos:7", 
                                        "docker.io/centos:centos7"
                                    ], 
                                    "sizeBytes": 192548537
                                }, 
                                {
                                    "names": [
                                        "registry.access.redhat.com/rhel7.1@sha256:1bc5a4c43bbb29a5a96a61896ff696933be3502e2f5fdc4cde02d9e101731fdd", 
                                        "registry.access.redhat.com/rhel7.1:latest"
                                    ], 
                                    "sizeBytes": 158229901
                                }, 
                                {
                                    "names": [
                                        "openshift/hello-openshift:6acabdc", 
                                        "openshift/hello-openshift:latest"
                                    ], 
                                    "sizeBytes": 5643318
                                }
                            ], 
                            "nodeInfo": {
                                "architecture": "amd64", 
                                "bootID": "9b91d16b-8962-41b1-a934-bcdeca0205d2", 
                                "containerRuntimeVersion": "docker://1.12.6", 
                                "kernelVersion": "3.10.0-327.22.2.el7.x86_64", 
                                "kubeProxyVersion": "v1.6.1+5115d708d7", 
                                "kubeletVersion": "v1.6.1+5115d708d7", 
                                "machineID": "f9370ed252a14f73b014c1301a9b6d1b", 
                                "operatingSystem": "linux", 
                                "osImage": "Red Hat Enterprise Linux Server 7.3 (Maipo)", 
                                "systemUUID": "EC20179D-CEE7-8FA3-53A5-5B49D0B44786"
                            }
                        }
                    }
                ], 
                "kind": "List", 
                "metadata": {}, 
                "resourceVersion": "", 
                "selfLink": ""
            }
        ], 
        "returncode": 0
    }, 
    "state": "list"
}

TASK [openshift_logging_fluentd : Set openshift_logging_fluentd_hosts] *********
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:190
ok: [openshift] => {
    "ansible_facts": {
        "openshift_logging_fluentd_hosts": [
            "172.18.4.93"
        ]
    }, 
    "changed": false
}

TASK [openshift_logging_fluentd : include] *************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:195
included: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml for openshift

TASK [openshift_logging_fluentd : Label 172.18.4.93 for Fluentd deployment] ****
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml:2
changed: [openshift] => {
    "changed": true, 
    "results": {
        "cmd": "/bin/oc label node 172.18.4.93 logging-infra-fluentd=true --overwrite", 
        "results": "", 
        "returncode": 0
    }, 
    "state": "add"
}

TASK [openshift_logging_fluentd : command] *************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml:10
changed: [openshift -> 127.0.0.1] => {
    "changed": true, 
    "cmd": [
        "sleep", 
        "0.5"
    ], 
    "delta": "0:00:00.502438", 
    "end": "2017-06-09 10:39:28.473332", 
    "rc": 0, 
    "start": "2017-06-09 10:39:27.970894"
}

TASK [openshift_logging_fluentd : Delete temp directory] ***********************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:202
ok: [openshift] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-hFvrLb", 
    "state": "absent"
}

TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:253
included: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging/tasks/update_master_config.yaml for openshift

TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging/tasks/main.yaml:36
skipping: [openshift] => {
    "changed": false, 
    "skip_reason": "Conditional result was False", 
    "skipped": true
}

TASK [openshift_logging : Cleaning up local temp dir] **************************
task path: /tmp/tmp.7yWYbvLCJ1/openhift-ansible/roles/openshift_logging/tasks/main.yaml:40
ok: [openshift -> 127.0.0.1] => {
    "changed": false, 
    "path": "/tmp/openshift-logging-ansible-XvDfd8", 
    "state": "absent"
}
META: ran handlers
META: ran handlers

PLAY [Update Master configs] ***************************************************
skipping: no hosts matched

PLAY RECAP *********************************************************************
localhost                  : ok=2    changed=0    unreachable=0    failed=0   
openshift                  : ok=213  changed=71   unreachable=0    failed=0   

/data/src/github.com/openshift/origin-aggregated-logging
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:170: executing 'oc get pods -l component=es' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.282s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:170: executing 'oc get pods -l component=es' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME                                      READY     STATUS    RESTARTS   AGE
logging-es-data-master-nij68urm-1-7f3ck   1/1       Running   0          1m

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:171: executing 'oc get pods -l component=kibana' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.280s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:171: executing 'oc get pods -l component=kibana' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME                     READY     STATUS    RESTARTS   AGE
logging-kibana-1-q2g1v   2/2       Running   0          39s

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:172: executing 'oc get pods -l component=curator' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.526s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:172: executing 'oc get pods -l component=curator' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME                      READY     STATUS    RESTARTS   AGE
logging-curator-1-819xv   1/1       Running   0          18s

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:175: executing 'oc get pods -l component=es-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.239s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:175: executing 'oc get pods -l component=es-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME                                          READY     STATUS    RESTARTS   AGE
logging-es-ops-data-master-1lbwcltv-1-w0wm7   1/1       Running   0          53s

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:176: executing 'oc get pods -l component=kibana-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.238s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:176: executing 'oc get pods -l component=kibana-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME                         READY     STATUS    RESTARTS   AGE
logging-kibana-ops-1-mq6b8   2/2       Running   0          26s

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:177: executing 'oc get pods -l component=curator-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.213s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:177: executing 'oc get pods -l component=curator-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME                          READY     STATUS    RESTARTS   AGE
logging-curator-ops-1-qxmbj   1/1       Running   0          14s

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:185: executing 'oc project logging > /dev/null' expecting success...
SUCCESS after 0.224s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:185: executing 'oc project logging > /dev/null' expecting success
There was no output from the command.
There was no error output from the command.
/data/src/github.com/openshift/origin-aggregated-logging/hack/testing /data/src/github.com/openshift/origin-aggregated-logging
--> Deploying template "logging/logging-fluentd-template-maker" for "-" to project logging

     logging-fluentd-template-maker
     ---------
     Template to create template for fluentd

     * With parameters:
        * MASTER_URL=https://kubernetes.default.svc.cluster.local
        * ES_HOST=logging-es
        * ES_PORT=9200
        * ES_CLIENT_CERT=/etc/fluent/keys/cert
        * ES_CLIENT_KEY=/etc/fluent/keys/key
        * ES_CA=/etc/fluent/keys/ca
        * OPS_HOST=logging-es-ops
        * OPS_PORT=9200
        * OPS_CLIENT_CERT=/etc/fluent/keys/cert
        * OPS_CLIENT_KEY=/etc/fluent/keys/key
        * OPS_CA=/etc/fluent/keys/ca
        * ES_COPY=false
        * ES_COPY_HOST=
        * ES_COPY_PORT=
        * ES_COPY_SCHEME=https
        * ES_COPY_CLIENT_CERT=
        * ES_COPY_CLIENT_KEY=
        * ES_COPY_CA=
        * ES_COPY_USERNAME=
        * ES_COPY_PASSWORD=
        * OPS_COPY_HOST=
        * OPS_COPY_PORT=
        * OPS_COPY_SCHEME=https
        * OPS_COPY_CLIENT_CERT=
        * OPS_COPY_CLIENT_KEY=
        * OPS_COPY_CA=
        * OPS_COPY_USERNAME=
        * OPS_COPY_PASSWORD=
        * IMAGE_PREFIX_DEFAULT=172.30.224.2:5000/logging/
        * IMAGE_VERSION_DEFAULT=latest
        * USE_JOURNAL=
        * JOURNAL_SOURCE=
        * JOURNAL_READ_FROM_HEAD=false
        * USE_MUX=false
        * USE_MUX_CLIENT=false
        * MUX_ALLOW_EXTERNAL=false
        * BUFFER_QUEUE_LIMIT=1024
        * BUFFER_SIZE_LIMIT=16777216

--> Creating resources ...
    template "logging-fluentd-template" created
--> Success
    Run 'oc status' to view your app.
WARNING: bridge-nf-call-ip6tables is disabled
START wait_for_fluentd_to_catch_up at 2017-06-09 14:39:43.181917679+00:00
added es message 8a328d17-4cf5-4e14-bf64-53ef5ff1d7c7
added es-ops message eeb4b9b4-0450-4a55-90ed-ceae8170de69
good - wait_for_fluentd_to_catch_up: found 1 record project logging for 8a328d17-4cf5-4e14-bf64-53ef5ff1d7c7
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for eeb4b9b4-0450-4a55-90ed-ceae8170de69
END wait_for_fluentd_to_catch_up took 11 seconds at 2017-06-09 14:39:54.843647473+00:00
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:223: executing 'oc login --username=admin --password=admin' expecting success...
SUCCESS after 0.227s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:223: executing 'oc login --username=admin --password=admin' expecting success
Standard output from the command:
Login successful.

You don't have any projects. You can try to create a new project, by running

    oc new-project <projectname>


There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:224: executing 'oc login --username=system:admin' expecting success...
SUCCESS after 0.245s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:224: executing 'oc login --username=system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.4.93:8443" as "system:admin" using existing credentials.

You have access to the following projects and can switch between them with 'oc project <projectname>':

  * default
    kube-public
    kube-system
    logging
    openshift
    openshift-infra

Using project "default".

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:225: executing 'oadm policy add-cluster-role-to-user cluster-admin admin' expecting success...
SUCCESS after 0.292s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:225: executing 'oadm policy add-cluster-role-to-user cluster-admin admin' expecting success
Standard output from the command:
cluster role "cluster-admin" added: "admin"

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:226: executing 'oc login --username=loguser --password=loguser' expecting success...
SUCCESS after 0.231s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:226: executing 'oc login --username=loguser --password=loguser' expecting success
Standard output from the command:
Login successful.

You don't have any projects. You can try to create a new project, by running

    oc new-project <projectname>


There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:227: executing 'oc login --username=system:admin' expecting success...
SUCCESS after 0.667s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:227: executing 'oc login --username=system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.4.93:8443" as "system:admin" using existing credentials.

You have access to the following projects and can switch between them with 'oc project <projectname>':

  * default
    kube-public
    kube-system
    logging
    openshift
    openshift-infra

Using project "default".

There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:228: executing 'oc project logging > /dev/null' expecting success...
SUCCESS after 0.336s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:228: executing 'oc project logging > /dev/null' expecting success
There was no output from the command.
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:229: executing 'oadm policy add-role-to-user view loguser' expecting success...
SUCCESS after 0.244s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:229: executing 'oadm policy add-role-to-user view loguser' expecting success
Standard output from the command:
role "view" added: "loguser"

There was no error output from the command.
Checking if Elasticsearch logging-es-data-master-nij68urm-1-7f3ck is ready
{
    "_id": "0",
    "_index": ".searchguard.logging-es-data-master-nij68urm-1-7f3ck",
    "_shards": {
        "failed": 0,
        "successful": 1,
        "total": 1
    },
    "_type": "rolesmapping",
    "_version": 2,
    "created": false
}
Checking if Elasticsearch logging-es-ops-data-master-1lbwcltv-1-w0wm7 is ready
{
    "_id": "0",
    "_index": ".searchguard.logging-es-ops-data-master-1lbwcltv-1-w0wm7",
    "_shards": {
        "failed": 0,
        "successful": 1,
        "total": 1
    },
    "_type": "rolesmapping",
    "_version": 2,
    "created": false
}
------------------------------------------
     Test 'admin' user can access cluster stats
------------------------------------------
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:265: executing 'test 200 = 200' expecting success...
SUCCESS after 0.009s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:265: executing 'test 200 = 200' expecting success
There was no output from the command.
There was no error output from the command.
------------------------------------------
     Test 'admin' user can access cluster stats for OPS cluster
------------------------------------------
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:274: executing 'test 200 = 200' expecting success...
SUCCESS after 0.010s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:274: executing 'test 200 = 200' expecting success
There was no output from the command.
There was no error output from the command.
Running e2e tests
Checking installation of the EFK stack...
Running test/cluster/rollout.sh:20: executing 'oc project logging' expecting success...
SUCCESS after 0.236s: test/cluster/rollout.sh:20: executing 'oc project logging' expecting success
Standard output from the command:
Already on project "logging" on server "https://172.18.4.93:8443".

There was no error output from the command.
[INFO] Checking for DeploymentConfigurations...
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-kibana' expecting success...
SUCCESS after 0.238s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-kibana' expecting success
Standard output from the command:
NAME             REVISION   DESIRED   CURRENT   TRIGGERED BY
logging-kibana   1          1         1         config

There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-kibana' expecting success...
SUCCESS after 0.225s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-kibana' expecting success
Standard output from the command:
replication controller "logging-kibana-1" successfully rolled out

There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-curator' expecting success...
SUCCESS after 0.224s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-curator' expecting success
Standard output from the command:
NAME              REVISION   DESIRED   CURRENT   TRIGGERED BY
logging-curator   1          1         1         config

There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-curator' expecting success...
SUCCESS after 0.215s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-curator' expecting success
Standard output from the command:
replication controller "logging-curator-1" successfully rolled out

There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-kibana-ops' expecting success...
SUCCESS after 0.242s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-kibana-ops' expecting success
Standard output from the command:
NAME                 REVISION   DESIRED   CURRENT   TRIGGERED BY
logging-kibana-ops   1          1         1         config

There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-kibana-ops' expecting success...
SUCCESS after 0.236s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-kibana-ops' expecting success
Standard output from the command:
replication controller "logging-kibana-ops-1" successfully rolled out

There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-curator-ops' expecting success...
SUCCESS after 0.212s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-curator-ops' expecting success
Standard output from the command:
NAME                  REVISION   DESIRED   CURRENT   TRIGGERED BY
logging-curator-ops   1          1         1         config

There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-curator-ops' expecting success...
SUCCESS after 0.215s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-curator-ops' expecting success
Standard output from the command:
replication controller "logging-curator-ops-1" successfully rolled out

There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-es-data-master-nij68urm' expecting success...
SUCCESS after 0.260s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-es-data-master-nij68urm' expecting success
Standard output from the command:
NAME                              REVISION   DESIRED   CURRENT   TRIGGERED BY
logging-es-data-master-nij68urm   1          1         1         config

There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-es-data-master-nij68urm' expecting success...
SUCCESS after 0.224s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-es-data-master-nij68urm' expecting success
Standard output from the command:
replication controller "logging-es-data-master-nij68urm-1" successfully rolled out

There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-es-ops-data-master-1lbwcltv' expecting success...
SUCCESS after 0.218s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-es-ops-data-master-1lbwcltv' expecting success
Standard output from the command:
NAME                                  REVISION   DESIRED   CURRENT   TRIGGERED BY
logging-es-ops-data-master-1lbwcltv   1          1         1         config

There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-es-ops-data-master-1lbwcltv' expecting success...
SUCCESS after 0.207s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-es-ops-data-master-1lbwcltv' expecting success
Standard output from the command:
replication controller "logging-es-ops-data-master-1lbwcltv-1" successfully rolled out

There was no error output from the command.
[INFO] Checking for Routes...
Running test/cluster/rollout.sh:30: executing 'oc get route logging-kibana' expecting success...
SUCCESS after 0.223s: test/cluster/rollout.sh:30: executing 'oc get route logging-kibana' expecting success
Standard output from the command:
NAME             HOST/PORT                                 PATH      SERVICES         PORT      TERMINATION          WILDCARD
logging-kibana   kibana.router.default.svc.cluster.local             logging-kibana   <all>     reencrypt/Redirect   None

There was no error output from the command.
Running test/cluster/rollout.sh:30: executing 'oc get route logging-kibana-ops' expecting success...
SUCCESS after 0.208s: test/cluster/rollout.sh:30: executing 'oc get route logging-kibana-ops' expecting success
Standard output from the command:
NAME                 HOST/PORT                                     PATH      SERVICES             PORT      TERMINATION          WILDCARD
logging-kibana-ops   kibana-ops.router.default.svc.cluster.local             logging-kibana-ops   <all>     reencrypt/Redirect   None

There was no error output from the command.
[INFO] Checking for Services...
Running test/cluster/rollout.sh:35: executing 'oc get service logging-es' expecting success...
SUCCESS after 0.206s: test/cluster/rollout.sh:35: executing 'oc get service logging-es' expecting success
Standard output from the command:
NAME         CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
logging-es   172.30.217.175   <none>        9200/TCP   1m

There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-es-cluster' expecting success...
SUCCESS after 0.233s: test/cluster/rollout.sh:35: executing 'oc get service logging-es-cluster' expecting success
Standard output from the command:
NAME                 CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
logging-es-cluster   172.30.159.212   <none>        9300/TCP   1m

There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-kibana' expecting success...
SUCCESS after 0.213s: test/cluster/rollout.sh:35: executing 'oc get service logging-kibana' expecting success
Standard output from the command:
NAME             CLUSTER-IP      EXTERNAL-IP   PORT(S)   AGE
logging-kibana   172.30.30.149   <none>        443/TCP   1m

There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-es-ops' expecting success...
SUCCESS after 0.222s: test/cluster/rollout.sh:35: executing 'oc get service logging-es-ops' expecting success
Standard output from the command:
NAME             CLUSTER-IP       EXTERNAL-IP   PORT(S)    AGE
logging-es-ops   172.30.110.130   <none>        9200/TCP   1m

There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-es-ops-cluster' expecting success...
SUCCESS after 0.238s: test/cluster/rollout.sh:35: executing 'oc get service logging-es-ops-cluster' expecting success
Standard output from the command:
NAME                     CLUSTER-IP     EXTERNAL-IP   PORT(S)    AGE
logging-es-ops-cluster   172.30.18.33   <none>        9300/TCP   1m

There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-kibana-ops' expecting success...
SUCCESS after 0.246s: test/cluster/rollout.sh:35: executing 'oc get service logging-kibana-ops' expecting success
Standard output from the command:
NAME                 CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
logging-kibana-ops   172.30.164.117   <none>        443/TCP   1m

There was no error output from the command.
[INFO] Checking for OAuthClients...
Running test/cluster/rollout.sh:40: executing 'oc get oauthclient kibana-proxy' expecting success...
SUCCESS after 0.208s: test/cluster/rollout.sh:40: executing 'oc get oauthclient kibana-proxy' expecting success
Standard output from the command:
NAME           SECRET                                                             WWW-CHALLENGE   REDIRECT URIS
kibana-proxy   hq6HeKWxwDxGEk870qHq9HqfkisXwGEz4HZgSD2i3ekR5I4zojxRSrNEz5qzVPiZ   FALSE           https://kibana.router.default.svc.cluster.local,https://kibana-ops.router.default.svc.cluster.local

There was no error output from the command.
[INFO] Checking for DaemonSets...
Running test/cluster/rollout.sh:45: executing 'oc get daemonset logging-fluentd' expecting success...
SUCCESS after 0.216s: test/cluster/rollout.sh:45: executing 'oc get daemonset logging-fluentd' expecting success
Standard output from the command:
NAME              DESIRED   CURRENT   READY     UP-TO-DATE   AVAILABLE   NODE-SELECTOR                AGE
logging-fluentd   1         1         1         1            1           logging-infra-fluentd=true   53s

There was no error output from the command.
Running test/cluster/rollout.sh:47: executing 'oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'' expecting any result and text '1'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.214s: test/cluster/rollout.sh:47: executing 'oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'' expecting any result and text '1'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
1
There was no error output from the command.
Checking for log entry matches between ES and their sources...
WARNING: bridge-nf-call-ip6tables is disabled
Running test/cluster/functionality.sh:40: executing 'oc login --username=admin --password=admin' expecting success...
SUCCESS after 0.356s: test/cluster/functionality.sh:40: executing 'oc login --username=admin --password=admin' expecting success
Standard output from the command:
Login successful.

You have access to the following projects and can switch between them with 'oc project <projectname>':

    default
    kube-public
    kube-system
  * logging
    openshift
    openshift-infra

Using project "logging".

There was no error output from the command.
Running test/cluster/functionality.sh:44: executing 'oc login --username=system:admin' expecting success...
SUCCESS after 0.248s: test/cluster/functionality.sh:44: executing 'oc login --username=system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.4.93:8443" as "system:admin" using existing credentials.

You have access to the following projects and can switch between them with 'oc project <projectname>':

    default
    kube-public
    kube-system
  * logging
    openshift
    openshift-infra

Using project "logging".

There was no error output from the command.
Running test/cluster/functionality.sh:45: executing 'oc project logging' expecting success...
SUCCESS after 0.234s: test/cluster/functionality.sh:45: executing 'oc project logging' expecting success
Standard output from the command:
Already on project "logging" on server "https://172.18.4.93:8443".

There was no error output from the command.
[INFO] Testing Kibana pod logging-kibana-1-q2g1v for a successful start...
Running test/cluster/functionality.sh:52: executing 'oc exec logging-kibana-1-q2g1v -c kibana -- curl -s --request HEAD --write-out '%{response_code}' http://localhost:5601/' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 120.304s: test/cluster/functionality.sh:52: executing 'oc exec logging-kibana-1-q2g1v -c kibana -- curl -s --request HEAD --write-out '%{response_code}' http://localhost:5601/' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:53: executing 'oc get pod logging-kibana-1-q2g1v -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.238s: test/cluster/functionality.sh:53: executing 'oc get pod logging-kibana-1-q2g1v -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
Running test/cluster/functionality.sh:54: executing 'oc get pod logging-kibana-1-q2g1v -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana-proxy")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.208s: test/cluster/functionality.sh:54: executing 'oc get pod logging-kibana-1-q2g1v -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana-proxy")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
[INFO] Testing Elasticsearch pod logging-es-data-master-nij68urm-1-7f3ck for a successful start...
Running test/cluster/functionality.sh:59: executing 'curl_es 'logging-es-data-master-nij68urm-1-7f3ck' '/' -X HEAD -w '%{response_code}'' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.361s: test/cluster/functionality.sh:59: executing 'curl_es 'logging-es-data-master-nij68urm-1-7f3ck' '/' -X HEAD -w '%{response_code}'' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:60: executing 'oc get pod logging-es-data-master-nij68urm-1-7f3ck -o jsonpath='{ .status.containerStatuses[?(@.name=="elasticsearch")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.247s: test/cluster/functionality.sh:60: executing 'oc get pod logging-es-data-master-nij68urm-1-7f3ck -o jsonpath='{ .status.containerStatuses[?(@.name=="elasticsearch")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
[INFO] Checking that Elasticsearch pod logging-es-data-master-nij68urm-1-7f3ck recovered its indices after starting...
Running test/cluster/functionality.sh:63: executing 'curl_es 'logging-es-data-master-nij68urm-1-7f3ck' '/_cluster/state/master_node' -w '%{response_code}'' expecting any result and text '}200$'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.435s: test/cluster/functionality.sh:63: executing 'curl_es 'logging-es-data-master-nij68urm-1-7f3ck' '/_cluster/state/master_node' -w '%{response_code}'' expecting any result and text '}200$'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
{"cluster_name":"logging-es","master_node":"m8K0X_TASYaiQ9JFtFjmOA"}200
There was no error output from the command.
[INFO] Elasticsearch pod logging-es-data-master-nij68urm-1-7f3ck is the master
[INFO] Checking that Elasticsearch pod logging-es-data-master-nij68urm-1-7f3ck has persisted indices created by Fluentd...
Running test/cluster/functionality.sh:76: executing 'curl_es 'logging-es-data-master-nij68urm-1-7f3ck' '/_cat/indices?h=index'' expecting any result and text '^(project|\.operations)\.'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.360s: test/cluster/functionality.sh:76: executing 'curl_es 'logging-es-data-master-nij68urm-1-7f3ck' '/_cat/indices?h=index'' expecting any result and text '^(project|\.operations)\.'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997                
.kibana                                                         
project.default.2d141b54-4d1f-11e7-83b0-0e6fb895db82.2017.06.09 
.searchguard.logging-es-data-master-nij68urm-1-7f3ck            
project.logging.319a45c2-4d1f-11e7-83b0-0e6fb895db82.2017.06.09 

There was no error output from the command.
[INFO] Cheking for index project.default.2d141b54-4d1f-11e7-83b0-0e6fb895db82 with Kibana pod logging-kibana-1-q2g1v...
Running test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-1-q2g1v' 'logging-es:9200' 'project.default.2d141b54-4d1f-11e7-83b0-0e6fb895db82' '/var/log/containers/*_2d141b54-4d1f-11e7-83b0-0e6fb895db82_*.log' '500' 'admin' '7NE-Tuce7Q8xnKgAeYP8mX4EvOadsadfvOZSCmilRuw' '127.0.0.1'' expecting success...
SUCCESS after 11.901s: test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-1-q2g1v' 'logging-es:9200' 'project.default.2d141b54-4d1f-11e7-83b0-0e6fb895db82' '/var/log/containers/*_2d141b54-4d1f-11e7-83b0-0e6fb895db82_*.log' '500' 'admin' '7NE-Tuce7Q8xnKgAeYP8mX4EvOadsadfvOZSCmilRuw' '127.0.0.1'' expecting success
Standard output from the command:
Executing command [oc exec logging-kibana-1-q2g1v -- curl -s --key /etc/kibana/keys/key --cert /etc/kibana/keys/cert --cacert /etc/kibana/keys/ca -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer 7NE-Tuce7Q8xnKgAeYP8mX4EvOadsadfvOZSCmilRuw' -H 'X-Forwarded-For: 127.0.0.1' -XGET "https://logging-es:9200/project.default.2d141b54-4d1f-11e7-83b0-0e6fb895db82.*/_search?q=hostname:ip-172-18-4-93&fields=message&size=500"]
Failure - no log entries found in Elasticsearch logging-es:9200 for index project.default.2d141b54-4d1f-11e7-83b0-0e6fb895db82

There was no error output from the command.
[INFO] Cheking for index project.logging.319a45c2-4d1f-11e7-83b0-0e6fb895db82 with Kibana pod logging-kibana-1-q2g1v...
Running test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-1-q2g1v' 'logging-es:9200' 'project.logging.319a45c2-4d1f-11e7-83b0-0e6fb895db82' '/var/log/containers/*_319a45c2-4d1f-11e7-83b0-0e6fb895db82_*.log' '500' 'admin' '7NE-Tuce7Q8xnKgAeYP8mX4EvOadsadfvOZSCmilRuw' '127.0.0.1'' expecting success...
SUCCESS after 0.616s: test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-1-q2g1v' 'logging-es:9200' 'project.logging.319a45c2-4d1f-11e7-83b0-0e6fb895db82' '/var/log/containers/*_319a45c2-4d1f-11e7-83b0-0e6fb895db82_*.log' '500' 'admin' '7NE-Tuce7Q8xnKgAeYP8mX4EvOadsadfvOZSCmilRuw' '127.0.0.1'' expecting success
Standard output from the command:
Executing command [oc exec logging-kibana-1-q2g1v -- curl -s --key /etc/kibana/keys/key --cert /etc/kibana/keys/cert --cacert /etc/kibana/keys/ca -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer 7NE-Tuce7Q8xnKgAeYP8mX4EvOadsadfvOZSCmilRuw' -H 'X-Forwarded-For: 127.0.0.1' -XGET "https://logging-es:9200/project.logging.319a45c2-4d1f-11e7-83b0-0e6fb895db82.*/_search?q=hostname:ip-172-18-4-93&fields=message&size=500"]
Failure - no log entries found in Elasticsearch logging-es:9200 for index project.logging.319a45c2-4d1f-11e7-83b0-0e6fb895db82

There was no error output from the command.
[INFO] Checking that Elasticsearch pod logging-es-data-master-nij68urm-1-7f3ck contains common data model index templates...
Running test/cluster/functionality.sh:105: executing 'oc exec logging-es-data-master-nij68urm-1-7f3ck -- ls -1 /usr/share/elasticsearch/index_templates' expecting success...
SUCCESS after 0.312s: test/cluster/functionality.sh:105: executing 'oc exec logging-es-data-master-nij68urm-1-7f3ck -- ls -1 /usr/share/elasticsearch/index_templates' expecting success
Standard output from the command:
com.redhat.viaq-openshift-operations.template.json
com.redhat.viaq-openshift-project.template.json
org.ovirt.viaq-collectd.template.json

There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-nij68urm-1-7f3ck' '/_template/com.redhat.viaq-openshift-operations.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.365s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-nij68urm-1-7f3ck' '/_template/com.redhat.viaq-openshift-operations.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-nij68urm-1-7f3ck' '/_template/com.redhat.viaq-openshift-project.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.584s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-nij68urm-1-7f3ck' '/_template/com.redhat.viaq-openshift-project.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-nij68urm-1-7f3ck' '/_template/org.ovirt.viaq-collectd.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.402s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-nij68urm-1-7f3ck' '/_template/org.ovirt.viaq-collectd.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:40: executing 'oc login --username=admin --password=admin' expecting success...
SUCCESS after 0.243s: test/cluster/functionality.sh:40: executing 'oc login --username=admin --password=admin' expecting success
Standard output from the command:
Login successful.

You have access to the following projects and can switch between them with 'oc project <projectname>':

    default
    kube-public
    kube-system
  * logging
    openshift
    openshift-infra

Using project "logging".

There was no error output from the command.
Running test/cluster/functionality.sh:44: executing 'oc login --username=system:admin' expecting success...
SUCCESS after 0.245s: test/cluster/functionality.sh:44: executing 'oc login --username=system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.4.93:8443" as "system:admin" using existing credentials.

You have access to the following projects and can switch between them with 'oc project <projectname>':

    default
    kube-public
    kube-system
  * logging
    openshift
    openshift-infra

Using project "logging".

There was no error output from the command.
Running test/cluster/functionality.sh:45: executing 'oc project logging' expecting success...
SUCCESS after 0.211s: test/cluster/functionality.sh:45: executing 'oc project logging' expecting success
Standard output from the command:
Already on project "logging" on server "https://172.18.4.93:8443".

There was no error output from the command.
[INFO] Testing Kibana pod logging-kibana-ops-1-mq6b8 for a successful start...
Running test/cluster/functionality.sh:52: executing 'oc exec logging-kibana-ops-1-mq6b8 -c kibana -- curl -s --request HEAD --write-out '%{response_code}' http://localhost:5601/' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 120.281s: test/cluster/functionality.sh:52: executing 'oc exec logging-kibana-ops-1-mq6b8 -c kibana -- curl -s --request HEAD --write-out '%{response_code}' http://localhost:5601/' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:53: executing 'oc get pod logging-kibana-ops-1-mq6b8 -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.237s: test/cluster/functionality.sh:53: executing 'oc get pod logging-kibana-ops-1-mq6b8 -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
Running test/cluster/functionality.sh:54: executing 'oc get pod logging-kibana-ops-1-mq6b8 -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana-proxy")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.222s: test/cluster/functionality.sh:54: executing 'oc get pod logging-kibana-ops-1-mq6b8 -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana-proxy")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
[INFO] Testing Elasticsearch pod logging-es-ops-data-master-1lbwcltv-1-w0wm7 for a successful start...
Running test/cluster/functionality.sh:59: executing 'curl_es 'logging-es-ops-data-master-1lbwcltv-1-w0wm7' '/' -X HEAD -w '%{response_code}'' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.366s: test/cluster/functionality.sh:59: executing 'curl_es 'logging-es-ops-data-master-1lbwcltv-1-w0wm7' '/' -X HEAD -w '%{response_code}'' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:60: executing 'oc get pod logging-es-ops-data-master-1lbwcltv-1-w0wm7 -o jsonpath='{ .status.containerStatuses[?(@.name=="elasticsearch")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.209s: test/cluster/functionality.sh:60: executing 'oc get pod logging-es-ops-data-master-1lbwcltv-1-w0wm7 -o jsonpath='{ .status.containerStatuses[?(@.name=="elasticsearch")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
[INFO] Checking that Elasticsearch pod logging-es-ops-data-master-1lbwcltv-1-w0wm7 recovered its indices after starting...
Running test/cluster/functionality.sh:63: executing 'curl_es 'logging-es-ops-data-master-1lbwcltv-1-w0wm7' '/_cluster/state/master_node' -w '%{response_code}'' expecting any result and text '}200$'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.383s: test/cluster/functionality.sh:63: executing 'curl_es 'logging-es-ops-data-master-1lbwcltv-1-w0wm7' '/_cluster/state/master_node' -w '%{response_code}'' expecting any result and text '}200$'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
{"cluster_name":"logging-es-ops","master_node":"gCpihUBoSUG8P_xjUHU0TQ"}200
There was no error output from the command.
[INFO] Elasticsearch pod logging-es-ops-data-master-1lbwcltv-1-w0wm7 is the master
[INFO] Checking that Elasticsearch pod logging-es-ops-data-master-1lbwcltv-1-w0wm7 has persisted indices created by Fluentd...
Running test/cluster/functionality.sh:76: executing 'curl_es 'logging-es-ops-data-master-1lbwcltv-1-w0wm7' '/_cat/indices?h=index'' expecting any result and text '^(project|\.operations)\.'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.403s: test/cluster/functionality.sh:76: executing 'curl_es 'logging-es-ops-data-master-1lbwcltv-1-w0wm7' '/_cat/indices?h=index'' expecting any result and text '^(project|\.operations)\.'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997         
.operations.2017.06.09                                   
.kibana                                                  
.searchguard.logging-es-ops-data-master-1lbwcltv-1-w0wm7 

There was no error output from the command.
[INFO] Cheking for index .operations with Kibana pod logging-kibana-ops-1-mq6b8...
Running test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-ops-1-mq6b8' 'logging-es-ops:9200' '.operations' '/var/log/messages' '500' 'admin' 'ZX199OEG8d5EnevyRVFTh2sYKPi-ixKNMSKL_HUxpQk' '127.0.0.1'' expecting success...
SUCCESS after 0.726s: test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-ops-1-mq6b8' 'logging-es-ops:9200' '.operations' '/var/log/messages' '500' 'admin' 'ZX199OEG8d5EnevyRVFTh2sYKPi-ixKNMSKL_HUxpQk' '127.0.0.1'' expecting success
Standard output from the command:
Executing command [oc exec logging-kibana-ops-1-mq6b8 -- curl -s --key /etc/kibana/keys/key --cert /etc/kibana/keys/cert --cacert /etc/kibana/keys/ca -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer ZX199OEG8d5EnevyRVFTh2sYKPi-ixKNMSKL_HUxpQk' -H 'X-Forwarded-For: 127.0.0.1' -XGET "https://logging-es-ops:9200/.operations.*/_search?q=hostname:ip-172-18-4-93&fields=message&size=500"]
Failure - no log entries found in Elasticsearch logging-es-ops:9200 for index .operations

There was no error output from the command.
[INFO] Checking that Elasticsearch pod logging-es-ops-data-master-1lbwcltv-1-w0wm7 contains common data model index templates...
Running test/cluster/functionality.sh:105: executing 'oc exec logging-es-ops-data-master-1lbwcltv-1-w0wm7 -- ls -1 /usr/share/elasticsearch/index_templates' expecting success...
SUCCESS after 0.282s: test/cluster/functionality.sh:105: executing 'oc exec logging-es-ops-data-master-1lbwcltv-1-w0wm7 -- ls -1 /usr/share/elasticsearch/index_templates' expecting success
Standard output from the command:
com.redhat.viaq-openshift-operations.template.json
com.redhat.viaq-openshift-project.template.json
org.ovirt.viaq-collectd.template.json

There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-1lbwcltv-1-w0wm7' '/_template/com.redhat.viaq-openshift-operations.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.468s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-1lbwcltv-1-w0wm7' '/_template/com.redhat.viaq-openshift-operations.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-1lbwcltv-1-w0wm7' '/_template/com.redhat.viaq-openshift-project.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.383s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-1lbwcltv-1-w0wm7' '/_template/com.redhat.viaq-openshift-project.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-1lbwcltv-1-w0wm7' '/_template/org.ovirt.viaq-collectd.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.380s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-1lbwcltv-1-w0wm7' '/_template/org.ovirt.viaq-collectd.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
running test test-curator.sh
configmap "logging-curator" deleted
configmap "logging-curator" created
deploymentconfig "logging-curator" scaled
deploymentconfig "logging-curator" scaled
configmap "logging-curator" deleted
configmap "logging-curator" created
deploymentconfig "logging-curator" scaled
deploymentconfig "logging-curator" scaled
Error: the curator pod should be in the error state
logging-curator-1-84xsd
Error: did not find the correct error message
error: expected 'logs (POD | TYPE/NAME) [CONTAINER_NAME]'.
POD or TYPE/NAME is a required argument for the logs command
See 'oc logs -h' for help and examples.
The project name must match this regex: [^[a-z0-9]([-a-z0-9]*[a-z0-9])?$] This does not match: [-BOGUS^PROJECT^NAME]
configmap "logging-curator" deleted
configmap "logging-curator" created
deploymentconfig "logging-curator" scaled
deploymentconfig "logging-curator" scaled
[ERROR] PID 4245: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:303: `echo running test $test` exited with status 1.
[INFO] 		Stack Trace: 
[INFO] 		  1: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:303: `echo running test $test`
[INFO]   Exiting with code 1.
/data/src/github.com/openshift/origin-aggregated-logging/hack/lib/log/system.sh: line 31:  4608 Terminated              sar -A -o "${binary_logfile}" 1 86400 > /dev/null 2> "${stderr_logfile}"  (wd: /data/src/github.com/openshift/origin-aggregated-logging)
[INFO] [CLEANUP] Beginning cleanup routines...
[INFO] [CLEANUP] Dumping cluster events to /tmp/origin-aggregated-logging/artifacts/events.txt
[INFO] [CLEANUP] Dumping etcd contents to /tmp/origin-aggregated-logging/artifacts/etcd
[WARNING] No compiled `etcdhelper` binary was found. Attempting to build one using:
[WARNING]   $ hack/build-go.sh tools/etcdhelper
++ Building go targets for linux/amd64: tools/etcdhelper
/data/src/github.com/openshift/origin-aggregated-logging/../origin/hack/build-go.sh took 272 seconds
2017-06-09 10:55:30.747418 I | warning: ignoring ServerName for user-provided CA for backwards compatibility is deprecated
[INFO] [CLEANUP] Dumping container logs to /tmp/origin-aggregated-logging/logs/containers
[INFO] [CLEANUP] Truncating log files over 200M
[INFO] [CLEANUP] Stopping docker containers
[INFO] [CLEANUP] Removing docker containers
Error: No such image, container or task: 70e18d140838
json: cannot unmarshal array into Go value of type types.ContainerJSON
Error response from daemon: You cannot remove a running container cb23e4d7da528e5cf68ff814d2e4183303d3113c11367c222598cb715a4ab359. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container c43b6c60fd85117dd2b8134d90585574f6eaceba6bbebe285524ae104ecf3044. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 26b0530d2b5bd52b9ce9e36e5fdcac5f2441c53ab702b66207d7d9e3927a7953. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 11bf46766abe0360eead62066ebcbf395ed83a2947c3927a40733d48ee025228. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 243466c6a7c3b21ff40e5c23b7f1fd7fddc58fbd01f9304be359c28680909264. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 82064bd28a9e0f7040d099fd2986e70bf3337c3ad2b8e4169b088c0caad56d5a. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 9f208267cad464755d3efc850440e93d55b3c03f3f7d877d3a052ec85dc48c2f. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 3b7f71aceeda370a4b66ad2283c9bb6ea23fddcc2b3532408a75006d8ce2f54c. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 7170de21d8a6bd7e0a498643a737c7738e18ea5bb701ab72689e598f6d8f5531. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 3476ee807d33ea52dc866fcf0231937571610637e74954198169e9ea3d4c8e3f. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 141a2a42a867e5365b6f1b5649e78710b8179e17d1c3085910dbf0ee648bba1c. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 21302b939b383159026445e72111e466da8c0f1c2bde167435ca98dfefc21b0b. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container b5f52f338703881d353f23e4bf4d5f0e6e9d6b5f6627337fbafbb643da605674. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 034d3576118f082d409da66bb1c8e2472b4c3d89f4864302de7dc45f9af61179. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container e8b7088f975fed198dd6b18103c19d5eef328c6592420c304babf78e77866942. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 6949884721e8542d86bd8cde2ba34840fb80cb9b11546512ec4fc35aa36c262c. Stop the container before attempting removal or use -f
[INFO] [CLEANUP] Killing child processes
[INFO] [CLEANUP] Pruning etcd data directory
[ERROR] /data/src/github.com/openshift/origin-aggregated-logging/logging.sh exited with code 1 after 00h 40m 59s
Error while running ssh/sudo command: 
set -e
pushd /data/src/github.com/openshift//origin-aggregated-logging/hack/testing >/dev/null
export PATH=$GOPATH/bin:$PATH

echo '***************************************************'
echo 'Running GIT_URL=https://github.com/openshift/origin-aggregated-logging GIT_BRANCH=master O_A_L_DIR=/data/src/github.com/openshift/origin-aggregated-logging OS_ROOT=/data/src/github.com/openshift/origin ENABLE_OPS_CLUSTER=true USE_LOCAL_SOURCE=true TEST_PERF=false VERBOSE=1 OS_ANSIBLE_REPO=https://github.com/openshift/openshift-ansible OS_ANSIBLE_BRANCH=master ./logging.sh...'
time GIT_URL=https://github.com/openshift/origin-aggregated-logging GIT_BRANCH=master O_A_L_DIR=/data/src/github.com/openshift/origin-aggregated-logging OS_ROOT=/data/src/github.com/openshift/origin ENABLE_OPS_CLUSTER=true USE_LOCAL_SOURCE=true TEST_PERF=false VERBOSE=1 OS_ANSIBLE_REPO=https://github.com/openshift/openshift-ansible OS_ANSIBLE_BRANCH=master ./logging.sh
echo 'Finished GIT_URL=https://github.com/openshift/origin-aggregated-logging GIT_BRANCH=master O_A_L_DIR=/data/src/github.com/openshift/origin-aggregated-logging OS_ROOT=/data/src/github.com/openshift/origin ENABLE_OPS_CLUSTER=true USE_LOCAL_SOURCE=true TEST_PERF=false VERBOSE=1 OS_ANSIBLE_REPO=https://github.com/openshift/openshift-ansible OS_ANSIBLE_BRANCH=master ./logging.sh'
echo '***************************************************'

popd >/dev/null
        
The SSH command responded with a non-zero exit status. Vagrant
assumes that this means the command failed. The output for this command
should be in the log above. Please read the output to determine what
went wrong.
==> openshiftdev: Downloading logs
==> openshiftdev: Downloading artifacts from '/var/log/yum.log' to '/var/lib/jenkins/jobs/test-origin-aggregated-logging/workspace/origin/artifacts/yum.log'
==> openshiftdev: Downloading artifacts from '/var/log/secure' to '/var/lib/jenkins/jobs/test-origin-aggregated-logging/workspace/origin/artifacts/secure'
==> openshiftdev: Downloading artifacts from '/var/log/audit/audit.log' to '/var/lib/jenkins/jobs/test-origin-aggregated-logging/workspace/origin/artifacts/audit.log'
==> openshiftdev: Downloading artifacts from '/tmp/origin-aggregated-logging/' to '/var/lib/jenkins/jobs/test-origin-aggregated-logging/workspace/origin/artifacts'
Build step 'Execute shell' marked build as failure
[description-setter] Could not determine description.
[PostBuildScript] - Execution post build scripts.
[workspace] $ /bin/sh -xe /tmp/hudson3978827358491594136.sh
+ INSTANCE_NAME=origin_logging-rhel7-1652
+ pushd origin
~/jobs/test-origin-aggregated-logging/workspace/origin ~/jobs/test-origin-aggregated-logging/workspace
+ rc=0
+ '[' -f .vagrant-openshift.json ']'
++ /usr/bin/vagrant ssh -c 'sudo ausearch -m avc'
+ ausearchresult='<no matches>'
+ rc=1
+ '[' '<no matches>' = '<no matches>' ']'
+ rc=0
+ /usr/bin/vagrant destroy -f
==> openshiftdev: Terminating the instance...
==> openshiftdev: Running cleanup tasks for 'shell' provisioner...
+ popd
~/jobs/test-origin-aggregated-logging/workspace
+ exit 0
[BFA] Scanning build for known causes...
[BFA] Found failure cause(s):
[BFA] Command Failure from category failure
[BFA] Done. 0s
Finished: FAILURE