"component": "curator-ops",
"logging-infra": "curator",
"provider": "openshift"
},
"name": "logging-curator-ops"
},
"spec": {
"containers": [
{
"env": [
{
"name": "K8S_HOST_URL",
"value": "https://kubernetes.default.svc.cluster.local"
},
{
"name": "ES_HOST",
"value": "logging-es-ops"
},
{
"name": "ES_PORT",
"value": "9200"
},
{
"name": "ES_CLIENT_CERT",
"value": "/etc/curator/keys/cert"
},
{
"name": "ES_CLIENT_KEY",
"value": "/etc/curator/keys/key"
},
{
"name": "ES_CA",
"value": "/etc/curator/keys/ca"
},
{
"name": "CURATOR_DEFAULT_DAYS",
"value": "30"
},
{
"name": "CURATOR_RUN_HOUR",
"value": "0"
},
{
"name": "CURATOR_RUN_MINUTE",
"value": "0"
},
{
"name": "CURATOR_RUN_TIMEZONE",
"value": "UTC"
},
{
"name": "CURATOR_SCRIPT_LOG_LEVEL",
"value": "INFO"
},
{
"name": "CURATOR_LOG_LEVEL",
"value": "ERROR"
}
],
"image": "172.30.97.34:5000/logging/logging-curator:latest",
"imagePullPolicy": "Always",
"name": "curator",
"resources": {
"limits": {
"cpu": "100m"
}
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/etc/curator/keys",
"name": "certs",
"readOnly": true
},
{
"mountPath": "/etc/curator/settings",
"name": "config",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {},
"serviceAccount": "aggregated-logging-curator",
"serviceAccountName": "aggregated-logging-curator",
"terminationGracePeriodSeconds": 30,
"volumes": [
{
"name": "certs",
"secret": {
"defaultMode": 420,
"secretName": "logging-curator"
}
},
{
"configMap": {
"defaultMode": 420,
"name": "logging-curator"
},
"name": "config"
}
]
}
},
"test": false,
"triggers": [
{
"type": "ConfigChange"
}
]
},
"status": {
"availableReplicas": 0,
"conditions": [
{
"lastTransitionTime": "2017-06-08T19:59:32Z",
"lastUpdateTime": "2017-06-08T19:59:32Z",
"message": "Deployment config does not have minimum availability.",
"status": "False",
"type": "Available"
},
{
"lastTransitionTime": "2017-06-08T19:59:32Z",
"lastUpdateTime": "2017-06-08T19:59:32Z",
"message": "replication controller \"logging-curator-ops-1\" is waiting for pod \"logging-curator-ops-1-deploy\" to run",
"status": "Unknown",
"type": "Progressing"
}
],
"details": {
"causes": [
{
"type": "ConfigChange"
}
],
"message": "config change"
},
"latestVersion": 1,
"observedGeneration": 2,
"replicas": 0,
"unavailableReplicas": 0,
"updatedReplicas": 0
}
}
],
"returncode": 0
},
"state": "present"
}
TASK [openshift_logging_curator : Delete temp directory] ***********************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_curator/tasks/main.yaml:109
ok: [openshift] => {
"changed": false,
"path": "/tmp/openshift-logging-ansible-t1TNFr",
"state": "absent"
}
TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:226
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging : include_role] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:241
statically included: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml
TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:2
[WARNING]: when statements should not include jinja2 templating delimiters
such as {{ }} or {% %}. Found: {{ openshift_logging_fluentd_nodeselector.keys()
| count }} > 1
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:6
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:10
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:14
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml:3
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml:7
ok: [openshift] => {
"ansible_facts": {
"fluentd_version": "3_5"
},
"changed": false
}
TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml:12
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : fail] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/determine_version.yaml:15
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:20
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:26
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : Create temp directory for doing work in] *****
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:33
ok: [openshift] => {
"changed": false,
"cmd": [
"mktemp",
"-d",
"/tmp/openshift-logging-ansible-XXXXXX"
],
"delta": "0:00:01.004043",
"end": "2017-06-08 15:59:37.179272",
"rc": 0,
"start": "2017-06-08 15:59:36.175229"
}
STDOUT:
/tmp/openshift-logging-ansible-f1GfFF
TASK [openshift_logging_fluentd : set_fact] ************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:38
ok: [openshift] => {
"ansible_facts": {
"tempdir": "/tmp/openshift-logging-ansible-f1GfFF"
},
"changed": false
}
TASK [openshift_logging_fluentd : Create templates subdirectory] ***************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:41
ok: [openshift] => {
"changed": false,
"gid": 0,
"group": "root",
"mode": "0755",
"owner": "root",
"path": "/tmp/openshift-logging-ansible-f1GfFF/templates",
"secontext": "unconfined_u:object_r:user_tmp_t:s0",
"size": 6,
"state": "directory",
"uid": 0
}
TASK [openshift_logging_fluentd : Create Fluentd service account] **************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:51
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : Create Fluentd service account] **************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:59
changed: [openshift] => {
"changed": true,
"results": {
"cmd": "/bin/oc get sa aggregated-logging-fluentd -o json -n logging",
"results": [
{
"apiVersion": "v1",
"imagePullSecrets": [
{
"name": "aggregated-logging-fluentd-dockercfg-p8tr6"
}
],
"kind": "ServiceAccount",
"metadata": {
"creationTimestamp": "2017-06-08T19:59:37Z",
"name": "aggregated-logging-fluentd",
"namespace": "logging",
"resourceVersion": "1554",
"selfLink": "/api/v1/namespaces/logging/serviceaccounts/aggregated-logging-fluentd",
"uid": "0069d2fc-4c85-11e7-a35b-0e88fb395880"
},
"secrets": [
{
"name": "aggregated-logging-fluentd-token-0zjhk"
},
{
"name": "aggregated-logging-fluentd-dockercfg-p8tr6"
}
]
}
],
"returncode": 0
},
"state": "present"
}
TASK [openshift_logging_fluentd : Set privileged permissions for Fluentd] ******
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:68
changed: [openshift] => {
"changed": true,
"present": "present",
"results": {
"cmd": "/bin/oc adm policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd -n logging",
"results": "",
"returncode": 0
}
}
TASK [openshift_logging_fluentd : Set cluster-reader permissions for Fluentd] ***
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:77
changed: [openshift] => {
"changed": true,
"present": "present",
"results": {
"cmd": "/bin/oc adm policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd -n logging",
"results": "",
"returncode": 0
}
}
TASK [openshift_logging_fluentd : template] ************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:86
ok: [openshift] => {
"changed": false,
"checksum": "a8c8596f5fc2c5dd7c8d33d244af17a2555be086",
"dest": "/tmp/openshift-logging-ansible-f1GfFF/fluent.conf",
"gid": 0,
"group": "root",
"md5sum": "579698b48ffce6276ee0e8d5ac71a338",
"mode": "0644",
"owner": "root",
"secontext": "unconfined_u:object_r:admin_home_t:s0",
"size": 1301,
"src": "/root/.ansible/tmp/ansible-tmp-1496951979.6-200870978165975/source",
"state": "file",
"uid": 0
}
TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:94
ok: [openshift] => {
"changed": false,
"checksum": "b3e75eddc4a0765edc77da092384c0c6f95440e1",
"dest": "/tmp/openshift-logging-ansible-f1GfFF/fluentd-throttle-config.yaml",
"gid": 0,
"group": "root",
"md5sum": "25871b8e0a9bedc166a6029872a6c336",
"mode": "0644",
"owner": "root",
"secontext": "unconfined_u:object_r:admin_home_t:s0",
"size": 133,
"src": "/root/.ansible/tmp/ansible-tmp-1496951979.98-24536926897790/source",
"state": "file",
"uid": 0
}
TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:100
ok: [openshift] => {
"changed": false,
"checksum": "a3aa36da13f3108aa4ad5b98d4866007b44e9798",
"dest": "/tmp/openshift-logging-ansible-f1GfFF/secure-forward.conf",
"gid": 0,
"group": "root",
"md5sum": "1084b00c427f4fa48dfc66d6ad6555d4",
"mode": "0644",
"owner": "root",
"secontext": "unconfined_u:object_r:admin_home_t:s0",
"size": 563,
"src": "/root/.ansible/tmp/ansible-tmp-1496951980.27-85839234566081/source",
"state": "file",
"uid": 0
}
TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:107
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:113
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : copy] ****************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:119
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging_fluentd : Set Fluentd configmap] ***********************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:125
changed: [openshift] => {
"changed": true,
"results": {
"cmd": "/bin/oc get configmap logging-fluentd -o json -n logging",
"results": [
{
"apiVersion": "v1",
"data": {
"fluent.conf": "# This file is the fluentd configuration entrypoint. Edit with care.\n\n@include configs.d/openshift/system.conf\n\n# In each section below, pre- and post- includes don't include anything initially;\n# they exist to enable future additions to openshift conf as needed.\n\n## sources\n## ordered so that syslog always runs last...\n@include configs.d/openshift/input-pre-*.conf\n@include configs.d/dynamic/input-docker-*.conf\n@include configs.d/dynamic/input-syslog-*.conf\n@include configs.d/openshift/input-post-*.conf\n##\n\n<label @INGRESS>\n## filters\n @include configs.d/openshift/filter-pre-*.conf\n @include configs.d/openshift/filter-retag-journal.conf\n @include configs.d/openshift/filter-k8s-meta.conf\n @include configs.d/openshift/filter-kibana-transform.conf\n @include configs.d/openshift/filter-k8s-flatten-hash.conf\n @include configs.d/openshift/filter-k8s-record-transform.conf\n @include configs.d/openshift/filter-syslog-record-transform.conf\n @include configs.d/openshift/filter-viaq-data-model.conf\n @include configs.d/openshift/filter-post-*.conf\n##\n\n## matches\n @include configs.d/openshift/output-pre-*.conf\n @include configs.d/openshift/output-operations.conf\n @include configs.d/openshift/output-applications.conf\n # no post - applications.conf matches everything left\n##\n</label>\n",
"secure-forward.conf": "# @type secure_forward\n\n# self_hostname ${HOSTNAME}\n# shared_key <SECRET_STRING>\n\n# secure yes\n# enable_strict_verification yes\n\n# ca_cert_path /etc/fluent/keys/your_ca_cert\n# ca_private_key_path /etc/fluent/keys/your_private_key\n # for private CA secret key\n# ca_private_key_passphrase passphrase\n\n# <server>\n # or IP\n# host server.fqdn.example.com\n# port 24284\n# </server>\n# <server>\n # ip address to connect\n# host 203.0.113.8\n # specify hostlabel for FQDN verification if ipaddress is used for host\n# hostlabel server.fqdn.example.com\n# </server>\n",
"throttle-config.yaml": "# Logging example fluentd throttling config file\n\n#example-project:\n# read_lines_limit: 10\n#\n#.operations:\n# read_lines_limit: 100\n"
},
"kind": "ConfigMap",
"metadata": {
"creationTimestamp": "2017-06-08T19:59:41Z",
"name": "logging-fluentd",
"namespace": "logging",
"resourceVersion": "1567",
"selfLink": "/api/v1/namespaces/logging/configmaps/logging-fluentd",
"uid": "024f3edf-4c85-11e7-a35b-0e88fb395880"
}
}
],
"returncode": 0
},
"state": "present"
}
TASK [openshift_logging_fluentd : Set logging-fluentd secret] ******************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:137
changed: [openshift] => {
"changed": true,
"results": {
"cmd": "/bin/oc secrets new logging-fluentd ca=/etc/origin/logging/ca.crt key=/etc/origin/logging/system.logging.fluentd.key cert=/etc/origin/logging/system.logging.fluentd.crt -n logging",
"results": "",
"returncode": 0
},
"state": "present"
}
TASK [openshift_logging_fluentd : Generate logging-fluentd daemonset definition] ***
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:154
ok: [openshift] => {
"changed": false,
"checksum": "7c780486c861d90bb77a4a4930d6e0a8b71516e5",
"dest": "/tmp/openshift-logging-ansible-f1GfFF/templates/logging-fluentd.yaml",
"gid": 0,
"group": "root",
"md5sum": "9817062927b49ed5ac1595c4a51481ac",
"mode": "0644",
"owner": "root",
"secontext": "unconfined_u:object_r:admin_home_t:s0",
"size": 3413,
"src": "/root/.ansible/tmp/ansible-tmp-1496951981.99-137611802794465/source",
"state": "file",
"uid": 0
}
TASK [openshift_logging_fluentd : Set logging-fluentd daemonset] ***************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:172
changed: [openshift] => {
"changed": true,
"results": {
"cmd": "/bin/oc get daemonset logging-fluentd -o json -n logging",
"results": [
{
"apiVersion": "extensions/v1beta1",
"kind": "DaemonSet",
"metadata": {
"creationTimestamp": "2017-06-08T19:59:42Z",
"generation": 1,
"labels": {
"component": "fluentd",
"logging-infra": "fluentd",
"provider": "openshift"
},
"name": "logging-fluentd",
"namespace": "logging",
"resourceVersion": "1575",
"selfLink": "/apis/extensions/v1beta1/namespaces/logging/daemonsets/logging-fluentd",
"uid": "034a1f9f-4c85-11e7-a35b-0e88fb395880"
},
"spec": {
"selector": {
"matchLabels": {
"component": "fluentd",
"provider": "openshift"
}
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"component": "fluentd",
"logging-infra": "fluentd",
"provider": "openshift"
},
"name": "fluentd-elasticsearch"
},
"spec": {
"containers": [
{
"env": [
{
"name": "K8S_HOST_URL",
"value": "https://kubernetes.default.svc.cluster.local"
},
{
"name": "ES_HOST",
"value": "logging-es"
},
{
"name": "ES_PORT",
"value": "9200"
},
{
"name": "ES_CLIENT_CERT",
"value": "/etc/fluent/keys/cert"
},
{
"name": "ES_CLIENT_KEY",
"value": "/etc/fluent/keys/key"
},
{
"name": "ES_CA",
"value": "/etc/fluent/keys/ca"
},
{
"name": "OPS_HOST",
"value": "logging-es-ops"
},
{
"name": "OPS_PORT",
"value": "9200"
},
{
"name": "OPS_CLIENT_CERT",
"value": "/etc/fluent/keys/cert"
},
{
"name": "OPS_CLIENT_KEY",
"value": "/etc/fluent/keys/key"
},
{
"name": "OPS_CA",
"value": "/etc/fluent/keys/ca"
},
{
"name": "ES_COPY",
"value": "false"
},
{
"name": "USE_JOURNAL",
"value": "true"
},
{
"name": "JOURNAL_SOURCE"
},
{
"name": "JOURNAL_READ_FROM_HEAD",
"value": "false"
}
],
"image": "172.30.97.34:5000/logging/logging-fluentd:latest",
"imagePullPolicy": "Always",
"name": "fluentd-elasticsearch",
"resources": {
"limits": {
"cpu": "100m",
"memory": "512Mi"
}
},
"securityContext": {
"privileged": true
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/run/log/journal",
"name": "runlogjournal"
},
{
"mountPath": "/var/log",
"name": "varlog"
},
{
"mountPath": "/var/lib/docker/containers",
"name": "varlibdockercontainers",
"readOnly": true
},
{
"mountPath": "/etc/fluent/configs.d/user",
"name": "config",
"readOnly": true
},
{
"mountPath": "/etc/fluent/keys",
"name": "certs",
"readOnly": true
},
{
"mountPath": "/etc/docker-hostname",
"name": "dockerhostname",
"readOnly": true
},
{
"mountPath": "/etc/localtime",
"name": "localtime",
"readOnly": true
},
{
"mountPath": "/etc/sysconfig/docker",
"name": "dockercfg",
"readOnly": true
},
{
"mountPath": "/etc/docker",
"name": "dockerdaemoncfg",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"logging-infra-fluentd": "true"
},
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {},
"serviceAccount": "aggregated-logging-fluentd",
"serviceAccountName": "aggregated-logging-fluentd",
"terminationGracePeriodSeconds": 30,
"volumes": [
{
"hostPath": {
"path": "/run/log/journal"
},
"name": "runlogjournal"
},
{
"hostPath": {
"path": "/var/log"
},
"name": "varlog"
},
{
"hostPath": {
"path": "/var/lib/docker/containers"
},
"name": "varlibdockercontainers"
},
{
"configMap": {
"defaultMode": 420,
"name": "logging-fluentd"
},
"name": "config"
},
{
"name": "certs",
"secret": {
"defaultMode": 420,
"secretName": "logging-fluentd"
}
},
{
"hostPath": {
"path": "/etc/hostname"
},
"name": "dockerhostname"
},
{
"hostPath": {
"path": "/etc/localtime"
},
"name": "localtime"
},
{
"hostPath": {
"path": "/etc/sysconfig/docker"
},
"name": "dockercfg"
},
{
"hostPath": {
"path": "/etc/docker"
},
"name": "dockerdaemoncfg"
}
]
}
},
"templateGeneration": 1,
"updateStrategy": {
"rollingUpdate": {
"maxUnavailable": 1
},
"type": "RollingUpdate"
}
},
"status": {
"currentNumberScheduled": 0,
"desiredNumberScheduled": 0,
"numberMisscheduled": 0,
"numberReady": 0,
"observedGeneration": 1
}
}
],
"returncode": 0
},
"state": "present"
}
TASK [openshift_logging_fluentd : Retrieve list of Fluentd hosts] **************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:183
ok: [openshift] => {
"changed": false,
"results": {
"cmd": "/bin/oc get node -o json -n default",
"results": [
{
"apiVersion": "v1",
"items": [
{
"apiVersion": "v1",
"kind": "Node",
"metadata": {
"annotations": {
"volumes.kubernetes.io/controller-managed-attach-detach": "true"
},
"creationTimestamp": "2017-06-08T19:46:09Z",
"labels": {
"beta.kubernetes.io/arch": "amd64",
"beta.kubernetes.io/os": "linux",
"kubernetes.io/hostname": "172.18.6.73"
},
"name": "172.18.6.73",
"namespace": "",
"resourceVersion": "1573",
"selfLink": "/api/v1/nodes/172.18.6.73",
"uid": "1e9742a3-4c83-11e7-a35b-0e88fb395880"
},
"spec": {
"externalID": "172.18.6.73",
"providerID": "aws:////i-09eaf9eba636279a8"
},
"status": {
"addresses": [
{
"address": "172.18.6.73",
"type": "LegacyHostIP"
},
{
"address": "172.18.6.73",
"type": "InternalIP"
},
{
"address": "172.18.6.73",
"type": "Hostname"
}
],
"allocatable": {
"cpu": "4",
"memory": "7129288Ki",
"pods": "40"
},
"capacity": {
"cpu": "4",
"memory": "7231688Ki",
"pods": "40"
},
"conditions": [
{
"lastHeartbeatTime": "2017-06-08T19:59:42Z",
"lastTransitionTime": "2017-06-08T19:46:09Z",
"message": "kubelet has sufficient disk space available",
"reason": "KubeletHasSufficientDisk",
"status": "False",
"type": "OutOfDisk"
},
{
"lastHeartbeatTime": "2017-06-08T19:59:42Z",
"lastTransitionTime": "2017-06-08T19:46:09Z",
"message": "kubelet has sufficient memory available",
"reason": "KubeletHasSufficientMemory",
"status": "False",
"type": "MemoryPressure"
},
{
"lastHeartbeatTime": "2017-06-08T19:59:42Z",
"lastTransitionTime": "2017-06-08T19:46:09Z",
"message": "kubelet has no disk pressure",
"reason": "KubeletHasNoDiskPressure",
"status": "False",
"type": "DiskPressure"
},
{
"lastHeartbeatTime": "2017-06-08T19:59:42Z",
"lastTransitionTime": "2017-06-08T19:46:09Z",
"message": "kubelet is posting ready status",
"reason": "KubeletReady",
"status": "True",
"type": "Ready"
}
],
"daemonEndpoints": {
"kubeletEndpoint": {
"Port": 10250
}
},
"images": [
{
"names": [
"openshift/origin-federation:6acabdc",
"openshift/origin-federation:latest"
],
"sizeBytes": 1205885664
},
{
"names": [
"docker.io/openshift/origin-docker-registry@sha256:ae8bef1a8222265fb34dd33dc434c766e63d492522798038f4c15bcf67e0fde2",
"docker.io/openshift/origin-docker-registry:latest"
],
"sizeBytes": 1100553430
},
{
"names": [
"openshift/origin-docker-registry:6acabdc",
"openshift/origin-docker-registry:latest"
],
"sizeBytes": 1100164272
},
{
"names": [
"openshift/origin-gitserver:6acabdc",
"openshift/origin-gitserver:latest"
],
"sizeBytes": 1086520226
},
{
"names": [
"openshift/openvswitch:6acabdc",
"openshift/openvswitch:latest"
],
"sizeBytes": 1053403667
},
{
"names": [
"openshift/node:6acabdc",
"openshift/node:latest"
],
"sizeBytes": 1051721928
},
{
"names": [
"openshift/origin-keepalived-ipfailover:6acabdc",
"openshift/origin-keepalived-ipfailover:latest"
],
"sizeBytes": 1028529711
},
{
"names": [
"openshift/origin-haproxy-router:6acabdc",
"openshift/origin-haproxy-router:latest"
],
"sizeBytes": 1022758742
},
{
"names": [
"openshift/origin-deployer:6acabdc",
"openshift/origin-deployer:latest"
],
"sizeBytes": 1001728427
},
{
"names": [
"openshift/origin:6acabdc",
"openshift/origin:latest"
],
"sizeBytes": 1001728427
},
{
"names": [
"openshift/origin-recycler:6acabdc",
"openshift/origin-recycler:latest"
],
"sizeBytes": 1001728427
},
{
"names": [
"openshift/origin-docker-builder:6acabdc",
"openshift/origin-docker-builder:latest"
],
"sizeBytes": 1001728427
},
{
"names": [
"openshift/origin-cluster-capacity:6acabdc",
"openshift/origin-cluster-capacity:latest"
],
"sizeBytes": 962455026
},
{
"names": [
"rhel7.1:latest"
],
"sizeBytes": 765301508
},
{
"names": [
"openshift/dind-master:latest"
],
"sizeBytes": 731456758
},
{
"names": [
"openshift/dind-node:latest"
],
"sizeBytes": 731453034
},
{
"names": [
"172.30.97.34:5000/logging/logging-auth-proxy@sha256:3feebf5acbdedc02eb0ed46a4971878e81104ba1bcb460e225a8273af42bc575",
"172.30.97.34:5000/logging/logging-auth-proxy:latest"
],
"sizeBytes": 715536037
},
{
"names": [
"<none>@<none>",
"<none>:<none>"
],
"sizeBytes": 709532011
},
{
"names": [
"docker.io/node@sha256:46db0dd19955beb87b841c30a6b9812ba626473283e84117d1c016deee5949a9",
"docker.io/node:0.10.36"
],
"sizeBytes": 697128386
},
{
"names": [
"docker.io/openshift/origin-logging-kibana@sha256:3b95068a8514b13780c047f955e99d481af5597bf2b214501030e7be2f4d6dc1",
"docker.io/openshift/origin-logging-kibana:latest"
],
"sizeBytes": 682851503
},
{
"names": [
"172.30.97.34:5000/logging/logging-kibana@sha256:3b05b97547a6798a8b55be392b0c45c9c69e8cc396e58e8e0954ff06e8c0dda8",
"172.30.97.34:5000/logging/logging-kibana:latest"
],
"sizeBytes": 682851502
},
{
"names": [
"openshift/dind:latest"
],
"sizeBytes": 640650210
},
{
"names": [
"172.30.97.34:5000/logging/logging-elasticsearch@sha256:56506d01f40445eb2310c01da95c24b218c130c0b9d6df8b74bf7f1021de3cd5",
"172.30.97.34:5000/logging/logging-elasticsearch:latest"
],
"sizeBytes": 623379790
},
{
"names": [
"172.30.97.34:5000/logging/logging-fluentd@sha256:1445780eae442f3b8c24a76b81d980d5fd72b8f7f90388de09ff38f3f5074014",
"172.30.97.34:5000/logging/logging-fluentd:latest"
],
"sizeBytes": 472182631
},
{
"names": [
"172.30.97.34:5000/logging/logging-curator@sha256:f56f0c2da71f8640eced9daa4f8a5a25e7b1dfd85a0826c963e83e045e13703d",
"172.30.97.34:5000/logging/logging-curator:latest"
],
"sizeBytes": 418287807
},
{
"names": [
"docker.io/openshift/base-centos7@sha256:aea292a3bddba020cde0ee83e6a45807931eb607c164ec6a3674f67039d8cd7c",
"docker.io/openshift/base-centos7:latest"
],
"sizeBytes": 383049978
},
{
"names": [
"rhel7.2:latest"
],
"sizeBytes": 377493597
},
{
"names": [
"openshift/origin-base:latest"
],
"sizeBytes": 363070172
},
{
"names": [
"docker.io/openshift/origin-logging-fluentd@sha256:b55877bf5f5624c0111688db16bac54ed7b64291114323fb010d9e8e630b1c89",
"docker.io/openshift/origin-logging-fluentd:latest"
],
"sizeBytes": 359219273
},
{
"names": [
"docker.io/fedora@sha256:69281ddd7b2600e5f2b17f1e12d7fba25207f459204fb2d15884f8432c479136",
"docker.io/fedora:25"
],
"sizeBytes": 230864375
},
{
"names": [
"docker.io/openshift/origin-logging-curator@sha256:c09a1f9ef6f2ca23b4d3dca5f4a2c0f5bb76ed65351d9295d7d3344e5bce3f89",
"docker.io/openshift/origin-logging-curator:latest"
],
"sizeBytes": 224977447
},
{
"names": [
"rhel7.3:latest",
"rhel7:latest"
],
"sizeBytes": 219121266
},
{
"names": [
"openshift/origin-pod:6acabdc",
"openshift/origin-pod:latest"
],
"sizeBytes": 213199843
},
{
"names": [
"registry.access.redhat.com/rhel7.2@sha256:98e6ca5d226c26e31a95cd67716afe22833c943e1926a21daf1a030906a02249",
"registry.access.redhat.com/rhel7.2:latest"
],
"sizeBytes": 201376319
},
{
"names": [
"registry.access.redhat.com/rhel7.3@sha256:1e232401d8e0ba53b36b757b4712fbcbd1dab9c21db039c45a84871a74e89e68",
"registry.access.redhat.com/rhel7.3:latest"
],
"sizeBytes": 192693772
},
{
"names": [
"docker.io/centos@sha256:bba1de7c9d900a898e3cadbae040dfe8a633c06bc104a0df76ae24483e03c077"
],
"sizeBytes": 192548999
},
{
"names": [
"openshift/origin-source:latest"
],
"sizeBytes": 192548894
},
{
"names": [
"docker.io/centos@sha256:aebf12af704307dfa0079b3babdca8d7e8ff6564696882bcb5d11f1d461f9ee9",
"docker.io/centos:7",
"docker.io/centos:centos7"
],
"sizeBytes": 192548537
},
{
"names": [
"registry.access.redhat.com/rhel7.1@sha256:1bc5a4c43bbb29a5a96a61896ff696933be3502e2f5fdc4cde02d9e101731fdd",
"registry.access.redhat.com/rhel7.1:latest"
],
"sizeBytes": 158229901
}
],
"nodeInfo": {
"architecture": "amd64",
"bootID": "054a1172-a815-43da-868f-312a99aa6ddf",
"containerRuntimeVersion": "docker://1.12.6",
"kernelVersion": "3.10.0-327.22.2.el7.x86_64",
"kubeProxyVersion": "v1.6.1+5115d708d7",
"kubeletVersion": "v1.6.1+5115d708d7",
"machineID": "f9370ed252a14f73b014c1301a9b6d1b",
"operatingSystem": "linux",
"osImage": "Red Hat Enterprise Linux Server 7.3 (Maipo)",
"systemUUID": "EC2EA42C-7DD4-071C-8268-E9D07D47666E"
}
}
}
],
"kind": "List",
"metadata": {},
"resourceVersion": "",
"selfLink": ""
}
],
"returncode": 0
},
"state": "list"
}
TASK [openshift_logging_fluentd : Set openshift_logging_fluentd_hosts] *********
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:190
ok: [openshift] => {
"ansible_facts": {
"openshift_logging_fluentd_hosts": [
"172.18.6.73"
]
},
"changed": false
}
TASK [openshift_logging_fluentd : include] *************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:195
included: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml for openshift
TASK [openshift_logging_fluentd : Label 172.18.6.73 for Fluentd deployment] ****
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml:2
changed: [openshift] => {
"changed": true,
"results": {
"cmd": "/bin/oc label node 172.18.6.73 logging-infra-fluentd=true --overwrite",
"results": "",
"returncode": 0
},
"state": "add"
}
TASK [openshift_logging_fluentd : command] *************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml:10
changed: [openshift -> 127.0.0.1] => {
"changed": true,
"cmd": [
"sleep",
"0.5"
],
"delta": "0:00:00.503099",
"end": "2017-06-08 15:59:45.630139",
"rc": 0,
"start": "2017-06-08 15:59:45.127040"
}
TASK [openshift_logging_fluentd : Delete temp directory] ***********************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging_fluentd/tasks/main.yaml:202
ok: [openshift] => {
"changed": false,
"path": "/tmp/openshift-logging-ansible-f1GfFF",
"state": "absent"
}
TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging/tasks/install_logging.yaml:253
included: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging/tasks/update_master_config.yaml for openshift
TASK [openshift_logging : include] *********************************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging/tasks/main.yaml:36
skipping: [openshift] => {
"changed": false,
"skip_reason": "Conditional result was False",
"skipped": true
}
TASK [openshift_logging : Cleaning up local temp dir] **************************
task path: /tmp/tmp.sFTNjMK016/openhift-ansible/roles/openshift_logging/tasks/main.yaml:40
ok: [openshift -> 127.0.0.1] => {
"changed": false,
"path": "/tmp/openshift-logging-ansible-9K3MWT",
"state": "absent"
}
META: ran handlers
META: ran handlers
PLAY [Update Master configs] ***************************************************
skipping: no hosts matched
PLAY RECAP *********************************************************************
localhost : ok=2 changed=0 unreachable=0 failed=0
openshift : ok=207 changed=70 unreachable=0 failed=0
/data/src/github.com/openshift/origin-aggregated-logging
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:170: executing 'oc get pods -l component=es' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.284s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:170: executing 'oc get pods -l component=es' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME READY STATUS RESTARTS AGE
logging-es-data-master-y3vgixad-1-qjxc8 1/1 Running 0 59s
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:171: executing 'oc get pods -l component=kibana' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.240s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:171: executing 'oc get pods -l component=kibana' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME READY STATUS RESTARTS AGE
logging-kibana-1-9w1ln 2/2 Running 0 35s
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:172: executing 'oc get pods -l component=curator' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 99.722s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:172: executing 'oc get pods -l component=curator' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 Pending 0 0s
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 ContainerCreating 0 0s
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 ContainerCreating 0 1s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 1s
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 2s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 3s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 4s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 5s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 6s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 7s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 8s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 9s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 10s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 11s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 12s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 13s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 14s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 15s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 16s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 17s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 18s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 19s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 20s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 21s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 22s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 23s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 24s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 25s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 26s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 27s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 28s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 29s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 3009 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 30s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 30s
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 31s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 32s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 33s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 34s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 35s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 36s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 37s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 38s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 39s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 40s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 41s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 42s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 43s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 44s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 45s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 46s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 47s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 48s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 49s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 50s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 51s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 52s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 53s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 54s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 55s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 56s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 57s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 58s
... repeated 2 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 59s
... repeated 3 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 0/1 rpc error: code = 2 desc = Error response from daemon: {"message":"devmapper: Thin Pool has 2957 free data blocks which is less than minimum required 3035 free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior"} 0 1m
... repeated 8 times
NAME READY STATUS RESTARTS AGE
logging-curator-1-zdg1n 1/1 Running 0 1m
Standard error from the command:
No resources found.
... repeated 84 times
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:175: executing 'oc get pods -l component=es-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.235s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:175: executing 'oc get pods -l component=es-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME READY STATUS RESTARTS AGE
logging-es-ops-data-master-ulq5p7o9-1-v4mfc 1/1 Running 0 2m
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:176: executing 'oc get pods -l component=kibana-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.231s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:176: executing 'oc get pods -l component=kibana-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME READY STATUS RESTARTS AGE
logging-kibana-ops-1-c11xv 2/2 Running 0 2m
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:177: executing 'oc get pods -l component=curator-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s...
SUCCESS after 0.263s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:177: executing 'oc get pods -l component=curator-ops' expecting any result and text 'Running'; re-trying every 0.2s until completion or 180.000s
Standard output from the command:
NAME READY STATUS RESTARTS AGE
logging-curator-ops-1-gmknp 1/1 Running 0 13s
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:185: executing 'oc project logging > /dev/null' expecting success...
SUCCESS after 0.252s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:185: executing 'oc project logging > /dev/null' expecting success
There was no output from the command.
There was no error output from the command.
/data/src/github.com/openshift/origin-aggregated-logging/hack/testing /data/src/github.com/openshift/origin-aggregated-logging
--> Deploying template "logging/logging-fluentd-template-maker" for "-" to project logging
logging-fluentd-template-maker
---------
Template to create template for fluentd
* With parameters:
* MASTER_URL=https://kubernetes.default.svc.cluster.local
* ES_HOST=logging-es
* ES_PORT=9200
* ES_CLIENT_CERT=/etc/fluent/keys/cert
* ES_CLIENT_KEY=/etc/fluent/keys/key
* ES_CA=/etc/fluent/keys/ca
* OPS_HOST=logging-es-ops
* OPS_PORT=9200
* OPS_CLIENT_CERT=/etc/fluent/keys/cert
* OPS_CLIENT_KEY=/etc/fluent/keys/key
* OPS_CA=/etc/fluent/keys/ca
* ES_COPY=false
* ES_COPY_HOST=
* ES_COPY_PORT=
* ES_COPY_SCHEME=https
* ES_COPY_CLIENT_CERT=
* ES_COPY_CLIENT_KEY=
* ES_COPY_CA=
* ES_COPY_USERNAME=
* ES_COPY_PASSWORD=
* OPS_COPY_HOST=
* OPS_COPY_PORT=
* OPS_COPY_SCHEME=https
* OPS_COPY_CLIENT_CERT=
* OPS_COPY_CLIENT_KEY=
* OPS_COPY_CA=
* OPS_COPY_USERNAME=
* OPS_COPY_PASSWORD=
* IMAGE_PREFIX_DEFAULT=172.30.97.34:5000/logging/
* IMAGE_VERSION_DEFAULT=latest
* USE_JOURNAL=
* JOURNAL_SOURCE=
* JOURNAL_READ_FROM_HEAD=false
* USE_MUX=false
* USE_MUX_CLIENT=false
* MUX_ALLOW_EXTERNAL=false
* BUFFER_QUEUE_LIMIT=1024
* BUFFER_SIZE_LIMIT=16777216
--> Creating resources ...
template "logging-fluentd-template" created
--> Success
Run 'oc status' to view your app.
START wait_for_fluentd_to_catch_up at 2017-06-08 20:01:38.532186606+00:00
added es message 1e2190f8-ecff-40a2-8cd5-f31d02b8e77a
added es-ops message 6d70020d-5503-42a0-a656-38933f5e3c2a
good - wait_for_fluentd_to_catch_up: found 1 record project logging for 1e2190f8-ecff-40a2-8cd5-f31d02b8e77a
good - wait_for_fluentd_to_catch_up: found 1 record project .operations for 6d70020d-5503-42a0-a656-38933f5e3c2a
END wait_for_fluentd_to_catch_up took 10 seconds at 2017-06-08 20:01:48.963594486+00:00
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:223: executing 'oc login --username=admin --password=admin' expecting success...
SUCCESS after 0.238s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:223: executing 'oc login --username=admin --password=admin' expecting success
Standard output from the command:
Login successful.
You don't have any projects. You can try to create a new project, by running
oc new-project <projectname>
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:224: executing 'oc login --username=system:admin' expecting success...
SUCCESS after 0.361s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:224: executing 'oc login --username=system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.6.73:8443" as "system:admin" using existing credentials.
You have access to the following projects and can switch between them with 'oc project <projectname>':
* default
kube-public
kube-system
logging
openshift
openshift-infra
Using project "default".
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:225: executing 'oadm policy add-cluster-role-to-user cluster-admin admin' expecting success...
SUCCESS after 0.247s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:225: executing 'oadm policy add-cluster-role-to-user cluster-admin admin' expecting success
Standard output from the command:
cluster role "cluster-admin" added: "admin"
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:226: executing 'oc login --username=loguser --password=loguser' expecting success...
SUCCESS after 0.231s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:226: executing 'oc login --username=loguser --password=loguser' expecting success
Standard output from the command:
Login successful.
You don't have any projects. You can try to create a new project, by running
oc new-project <projectname>
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:227: executing 'oc login --username=system:admin' expecting success...
SUCCESS after 0.238s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:227: executing 'oc login --username=system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.6.73:8443" as "system:admin" using existing credentials.
You have access to the following projects and can switch between them with 'oc project <projectname>':
* default
kube-public
kube-system
logging
openshift
openshift-infra
Using project "default".
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:228: executing 'oc project logging > /dev/null' expecting success...
SUCCESS after 0.406s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:228: executing 'oc project logging > /dev/null' expecting success
There was no output from the command.
There was no error output from the command.
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:229: executing 'oadm policy add-role-to-user view loguser' expecting success...
SUCCESS after 0.225s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:229: executing 'oadm policy add-role-to-user view loguser' expecting success
Standard output from the command:
role "view" added: "loguser"
There was no error output from the command.
Checking if Elasticsearch logging-es-data-master-y3vgixad-1-qjxc8 is ready
{
"_id": "0",
"_index": ".searchguard.logging-es-data-master-y3vgixad-1-qjxc8",
"_shards": {
"failed": 0,
"successful": 1,
"total": 1
},
"_type": "rolesmapping",
"_version": 2,
"created": false
}
Checking if Elasticsearch logging-es-ops-data-master-ulq5p7o9-1-v4mfc is ready
{
"_id": "0",
"_index": ".searchguard.logging-es-ops-data-master-ulq5p7o9-1-v4mfc",
"_shards": {
"failed": 0,
"successful": 1,
"total": 1
},
"_type": "rolesmapping",
"_version": 2,
"created": false
}
------------------------------------------
Test 'admin' user can access cluster stats
------------------------------------------
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:265: executing 'test 200 = 200' expecting success...
SUCCESS after 0.009s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:265: executing 'test 200 = 200' expecting success
There was no output from the command.
There was no error output from the command.
------------------------------------------
Test 'admin' user can access cluster stats for OPS cluster
------------------------------------------
Running /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:274: executing 'test 200 = 200' expecting success...
SUCCESS after 0.010s: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:274: executing 'test 200 = 200' expecting success
There was no output from the command.
There was no error output from the command.
Running e2e tests
Checking installation of the EFK stack...
Running test/cluster/rollout.sh:20: executing 'oc project logging' expecting success...
SUCCESS after 0.213s: test/cluster/rollout.sh:20: executing 'oc project logging' expecting success
Standard output from the command:
Already on project "logging" on server "https://172.18.6.73:8443".
There was no error output from the command.
[INFO] Checking for DeploymentConfigurations...
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-kibana' expecting success...
SUCCESS after 0.208s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-kibana' expecting success
Standard output from the command:
NAME REVISION DESIRED CURRENT TRIGGERED BY
logging-kibana 1 1 1 config
There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-kibana' expecting success...
SUCCESS after 0.206s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-kibana' expecting success
Standard output from the command:
replication controller "logging-kibana-1" successfully rolled out
There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-curator' expecting success...
SUCCESS after 0.206s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-curator' expecting success
Standard output from the command:
NAME REVISION DESIRED CURRENT TRIGGERED BY
logging-curator 1 1 1 config
There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-curator' expecting success...
SUCCESS after 0.235s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-curator' expecting success
Standard output from the command:
replication controller "logging-curator-1" successfully rolled out
There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-kibana-ops' expecting success...
SUCCESS after 0.213s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-kibana-ops' expecting success
Standard output from the command:
NAME REVISION DESIRED CURRENT TRIGGERED BY
logging-kibana-ops 1 1 1 config
There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-kibana-ops' expecting success...
SUCCESS after 0.217s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-kibana-ops' expecting success
Standard output from the command:
replication controller "logging-kibana-ops-1" successfully rolled out
There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-curator-ops' expecting success...
SUCCESS after 0.208s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-curator-ops' expecting success
Standard output from the command:
NAME REVISION DESIRED CURRENT TRIGGERED BY
logging-curator-ops 1 1 1 config
There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-curator-ops' expecting success...
SUCCESS after 0.226s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-curator-ops' expecting success
Standard output from the command:
replication controller "logging-curator-ops-1" successfully rolled out
There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-es-data-master-y3vgixad' expecting success...
SUCCESS after 0.233s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-es-data-master-y3vgixad' expecting success
Standard output from the command:
NAME REVISION DESIRED CURRENT TRIGGERED BY
logging-es-data-master-y3vgixad 1 1 1 config
There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-es-data-master-y3vgixad' expecting success...
SUCCESS after 0.217s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-es-data-master-y3vgixad' expecting success
Standard output from the command:
replication controller "logging-es-data-master-y3vgixad-1" successfully rolled out
There was no error output from the command.
Running test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-es-ops-data-master-ulq5p7o9' expecting success...
SUCCESS after 0.221s: test/cluster/rollout.sh:24: executing 'oc get deploymentconfig logging-es-ops-data-master-ulq5p7o9' expecting success
Standard output from the command:
NAME REVISION DESIRED CURRENT TRIGGERED BY
logging-es-ops-data-master-ulq5p7o9 1 1 1 config
There was no error output from the command.
Running test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-es-ops-data-master-ulq5p7o9' expecting success...
SUCCESS after 0.280s: test/cluster/rollout.sh:25: executing 'oc rollout status deploymentconfig/logging-es-ops-data-master-ulq5p7o9' expecting success
Standard output from the command:
replication controller "logging-es-ops-data-master-ulq5p7o9-1" successfully rolled out
There was no error output from the command.
[INFO] Checking for Routes...
Running test/cluster/rollout.sh:30: executing 'oc get route logging-kibana' expecting success...
SUCCESS after 0.214s: test/cluster/rollout.sh:30: executing 'oc get route logging-kibana' expecting success
Standard output from the command:
NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD
logging-kibana kibana.router.default.svc.cluster.local logging-kibana <all> reencrypt/Redirect None
There was no error output from the command.
Running test/cluster/rollout.sh:30: executing 'oc get route logging-kibana-ops' expecting success...
SUCCESS after 0.216s: test/cluster/rollout.sh:30: executing 'oc get route logging-kibana-ops' expecting success
Standard output from the command:
NAME HOST/PORT PATH SERVICES PORT TERMINATION WILDCARD
logging-kibana-ops kibana-ops.router.default.svc.cluster.local logging-kibana-ops <all> reencrypt/Redirect None
There was no error output from the command.
[INFO] Checking for Services...
Running test/cluster/rollout.sh:35: executing 'oc get service logging-es' expecting success...
SUCCESS after 0.227s: test/cluster/rollout.sh:35: executing 'oc get service logging-es' expecting success
Standard output from the command:
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
logging-es 172.30.170.120 <none> 9200/TCP 3m
There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-es-cluster' expecting success...
SUCCESS after 0.209s: test/cluster/rollout.sh:35: executing 'oc get service logging-es-cluster' expecting success
Standard output from the command:
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
logging-es-cluster 172.30.49.222 <none> 9300/TCP 3m
There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-kibana' expecting success...
SUCCESS after 0.217s: test/cluster/rollout.sh:35: executing 'oc get service logging-kibana' expecting success
Standard output from the command:
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
logging-kibana 172.30.180.209 <none> 443/TCP 3m
There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-es-ops' expecting success...
SUCCESS after 0.208s: test/cluster/rollout.sh:35: executing 'oc get service logging-es-ops' expecting success
Standard output from the command:
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
logging-es-ops 172.30.19.214 <none> 9200/TCP 3m
There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-es-ops-cluster' expecting success...
SUCCESS after 0.210s: test/cluster/rollout.sh:35: executing 'oc get service logging-es-ops-cluster' expecting success
Standard output from the command:
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
logging-es-ops-cluster 172.30.13.94 <none> 9300/TCP 3m
There was no error output from the command.
Running test/cluster/rollout.sh:35: executing 'oc get service logging-kibana-ops' expecting success...
SUCCESS after 0.212s: test/cluster/rollout.sh:35: executing 'oc get service logging-kibana-ops' expecting success
Standard output from the command:
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
logging-kibana-ops 172.30.215.140 <none> 443/TCP 3m
There was no error output from the command.
[INFO] Checking for OAuthClients...
Running test/cluster/rollout.sh:40: executing 'oc get oauthclient kibana-proxy' expecting success...
SUCCESS after 0.204s: test/cluster/rollout.sh:40: executing 'oc get oauthclient kibana-proxy' expecting success
Standard output from the command:
NAME SECRET WWW-CHALLENGE REDIRECT URIS
kibana-proxy QX45AHGZj5CQvabZVUD7KHDabtstdJu7JdfxLZftwOrKeOT7bikSPLrI4l4ic0hZ FALSE https://kibana-ops.router.default.svc.cluster.local
There was no error output from the command.
[INFO] Checking for DaemonSets...
Running test/cluster/rollout.sh:45: executing 'oc get daemonset logging-fluentd' expecting success...
SUCCESS after 0.206s: test/cluster/rollout.sh:45: executing 'oc get daemonset logging-fluentd' expecting success
Standard output from the command:
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE-SELECTOR AGE
logging-fluentd 1 1 1 1 1 logging-infra-fluentd=true 2m
There was no error output from the command.
Running test/cluster/rollout.sh:47: executing 'oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'' expecting any result and text '1'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.209s: test/cluster/rollout.sh:47: executing 'oc get daemonset logging-fluentd -o jsonpath='{ .status.numberReady }'' expecting any result and text '1'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
1
There was no error output from the command.
Checking for log entry matches between ES and their sources...
WARNING: bridge-nf-call-ip6tables is disabled
Running test/cluster/functionality.sh:40: executing 'oc login --username=admin --password=admin' expecting success...
SUCCESS after 0.242s: test/cluster/functionality.sh:40: executing 'oc login --username=admin --password=admin' expecting success
Standard output from the command:
Login successful.
You have access to the following projects and can switch between them with 'oc project <projectname>':
default
kube-public
kube-system
* logging
openshift
openshift-infra
Using project "logging".
There was no error output from the command.
Running test/cluster/functionality.sh:44: executing 'oc login --username=system:admin' expecting success...
SUCCESS after 0.250s: test/cluster/functionality.sh:44: executing 'oc login --username=system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.6.73:8443" as "system:admin" using existing credentials.
You have access to the following projects and can switch between them with 'oc project <projectname>':
default
kube-public
kube-system
* logging
openshift
openshift-infra
Using project "logging".
There was no error output from the command.
Running test/cluster/functionality.sh:45: executing 'oc project logging' expecting success...
SUCCESS after 0.243s: test/cluster/functionality.sh:45: executing 'oc project logging' expecting success
Standard output from the command:
Already on project "logging" on server "https://172.18.6.73:8443".
There was no error output from the command.
[INFO] Testing Kibana pod logging-kibana-1-9w1ln for a successful start...
Running test/cluster/functionality.sh:52: executing 'oc exec logging-kibana-1-9w1ln -c kibana -- curl -s --request HEAD --write-out '%{response_code}' http://localhost:5601/' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 120.280s: test/cluster/functionality.sh:52: executing 'oc exec logging-kibana-1-9w1ln -c kibana -- curl -s --request HEAD --write-out '%{response_code}' http://localhost:5601/' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:53: executing 'oc get pod logging-kibana-1-9w1ln -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.229s: test/cluster/functionality.sh:53: executing 'oc get pod logging-kibana-1-9w1ln -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
Running test/cluster/functionality.sh:54: executing 'oc get pod logging-kibana-1-9w1ln -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana-proxy")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.210s: test/cluster/functionality.sh:54: executing 'oc get pod logging-kibana-1-9w1ln -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana-proxy")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
[INFO] Testing Elasticsearch pod logging-es-data-master-y3vgixad-1-qjxc8 for a successful start...
Running test/cluster/functionality.sh:59: executing 'curl_es 'logging-es-data-master-y3vgixad-1-qjxc8' '/' -X HEAD -w '%{response_code}'' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.384s: test/cluster/functionality.sh:59: executing 'curl_es 'logging-es-data-master-y3vgixad-1-qjxc8' '/' -X HEAD -w '%{response_code}'' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:60: executing 'oc get pod logging-es-data-master-y3vgixad-1-qjxc8 -o jsonpath='{ .status.containerStatuses[?(@.name=="elasticsearch")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.262s: test/cluster/functionality.sh:60: executing 'oc get pod logging-es-data-master-y3vgixad-1-qjxc8 -o jsonpath='{ .status.containerStatuses[?(@.name=="elasticsearch")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
[INFO] Checking that Elasticsearch pod logging-es-data-master-y3vgixad-1-qjxc8 recovered its indices after starting...
Running test/cluster/functionality.sh:63: executing 'curl_es 'logging-es-data-master-y3vgixad-1-qjxc8' '/_cluster/state/master_node' -w '%{response_code}'' expecting any result and text '}200$'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.360s: test/cluster/functionality.sh:63: executing 'curl_es 'logging-es-data-master-y3vgixad-1-qjxc8' '/_cluster/state/master_node' -w '%{response_code}'' expecting any result and text '}200$'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
{"cluster_name":"logging-es","master_node":"uwXOiWggQwOmF5a3uw1F4A"}200
There was no error output from the command.
[INFO] Elasticsearch pod logging-es-data-master-y3vgixad-1-qjxc8 is the master
[INFO] Checking that Elasticsearch pod logging-es-data-master-y3vgixad-1-qjxc8 has persisted indices created by Fluentd...
Running test/cluster/functionality.sh:76: executing 'curl_es 'logging-es-data-master-y3vgixad-1-qjxc8' '/_cat/indices?h=index'' expecting any result and text '^(project|\.operations)\.'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.362s: test/cluster/functionality.sh:76: executing 'curl_es 'logging-es-data-master-y3vgixad-1-qjxc8' '/_cat/indices?h=index'' expecting any result and text '^(project|\.operations)\.'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997
project.default.1b8425b6-4c83-11e7-a35b-0e88fb395880.2017.06.08
project.logging.202cf39d-4c83-11e7-a35b-0e88fb395880.2017.06.08
.searchguard.logging-es-data-master-y3vgixad-1-qjxc8
.kibana
There was no error output from the command.
[INFO] Cheking for index project.default.1b8425b6-4c83-11e7-a35b-0e88fb395880 with Kibana pod logging-kibana-1-9w1ln...
Running test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-1-9w1ln' 'logging-es:9200' 'project.default.1b8425b6-4c83-11e7-a35b-0e88fb395880' '/var/log/containers/*_1b8425b6-4c83-11e7-a35b-0e88fb395880_*.log' '500' 'admin' 'gsLsr9liUXk8hDyjKee1GHn6j2e1nEI9Hex_qHFvUmk' '127.0.0.1'' expecting success...
SUCCESS after 9.590s: test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-1-9w1ln' 'logging-es:9200' 'project.default.1b8425b6-4c83-11e7-a35b-0e88fb395880' '/var/log/containers/*_1b8425b6-4c83-11e7-a35b-0e88fb395880_*.log' '500' 'admin' 'gsLsr9liUXk8hDyjKee1GHn6j2e1nEI9Hex_qHFvUmk' '127.0.0.1'' expecting success
Standard output from the command:
Executing command [oc exec logging-kibana-1-9w1ln -- curl -s --key /etc/kibana/keys/key --cert /etc/kibana/keys/cert --cacert /etc/kibana/keys/ca -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer gsLsr9liUXk8hDyjKee1GHn6j2e1nEI9Hex_qHFvUmk' -H 'X-Forwarded-For: 127.0.0.1' -XGET "https://logging-es:9200/project.default.1b8425b6-4c83-11e7-a35b-0e88fb395880.*/_search?q=hostname:ip-172-18-6-73&fields=message&size=500"]
Failure - no log entries found in Elasticsearch logging-es:9200 for index project.default.1b8425b6-4c83-11e7-a35b-0e88fb395880
There was no error output from the command.
[INFO] Cheking for index project.logging.202cf39d-4c83-11e7-a35b-0e88fb395880 with Kibana pod logging-kibana-1-9w1ln...
Running test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-1-9w1ln' 'logging-es:9200' 'project.logging.202cf39d-4c83-11e7-a35b-0e88fb395880' '/var/log/containers/*_202cf39d-4c83-11e7-a35b-0e88fb395880_*.log' '500' 'admin' 'gsLsr9liUXk8hDyjKee1GHn6j2e1nEI9Hex_qHFvUmk' '127.0.0.1'' expecting success...
SUCCESS after 0.606s: test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-1-9w1ln' 'logging-es:9200' 'project.logging.202cf39d-4c83-11e7-a35b-0e88fb395880' '/var/log/containers/*_202cf39d-4c83-11e7-a35b-0e88fb395880_*.log' '500' 'admin' 'gsLsr9liUXk8hDyjKee1GHn6j2e1nEI9Hex_qHFvUmk' '127.0.0.1'' expecting success
Standard output from the command:
Executing command [oc exec logging-kibana-1-9w1ln -- curl -s --key /etc/kibana/keys/key --cert /etc/kibana/keys/cert --cacert /etc/kibana/keys/ca -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer gsLsr9liUXk8hDyjKee1GHn6j2e1nEI9Hex_qHFvUmk' -H 'X-Forwarded-For: 127.0.0.1' -XGET "https://logging-es:9200/project.logging.202cf39d-4c83-11e7-a35b-0e88fb395880.*/_search?q=hostname:ip-172-18-6-73&fields=message&size=500"]
Failure - no log entries found in Elasticsearch logging-es:9200 for index project.logging.202cf39d-4c83-11e7-a35b-0e88fb395880
There was no error output from the command.
[INFO] Checking that Elasticsearch pod logging-es-data-master-y3vgixad-1-qjxc8 contains common data model index templates...
Running test/cluster/functionality.sh:105: executing 'oc exec logging-es-data-master-y3vgixad-1-qjxc8 -- ls -1 /usr/share/elasticsearch/index_templates' expecting success...
SUCCESS after 0.290s: test/cluster/functionality.sh:105: executing 'oc exec logging-es-data-master-y3vgixad-1-qjxc8 -- ls -1 /usr/share/elasticsearch/index_templates' expecting success
Standard output from the command:
com.redhat.viaq-openshift-operations.template.json
com.redhat.viaq-openshift-project.template.json
There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-y3vgixad-1-qjxc8' '/_template/com.redhat.viaq-openshift-operations.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.355s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-y3vgixad-1-qjxc8' '/_template/com.redhat.viaq-openshift-operations.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-y3vgixad-1-qjxc8' '/_template/com.redhat.viaq-openshift-project.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.397s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-data-master-y3vgixad-1-qjxc8' '/_template/com.redhat.viaq-openshift-project.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:40: executing 'oc login --username=admin --password=admin' expecting success...
SUCCESS after 0.227s: test/cluster/functionality.sh:40: executing 'oc login --username=admin --password=admin' expecting success
Standard output from the command:
Login successful.
You have access to the following projects and can switch between them with 'oc project <projectname>':
default
kube-public
kube-system
* logging
openshift
openshift-infra
Using project "logging".
There was no error output from the command.
Running test/cluster/functionality.sh:44: executing 'oc login --username=system:admin' expecting success...
SUCCESS after 0.242s: test/cluster/functionality.sh:44: executing 'oc login --username=system:admin' expecting success
Standard output from the command:
Logged into "https://172.18.6.73:8443" as "system:admin" using existing credentials.
You have access to the following projects and can switch between them with 'oc project <projectname>':
default
kube-public
kube-system
* logging
openshift
openshift-infra
Using project "logging".
There was no error output from the command.
Running test/cluster/functionality.sh:45: executing 'oc project logging' expecting success...
SUCCESS after 0.214s: test/cluster/functionality.sh:45: executing 'oc project logging' expecting success
Standard output from the command:
Already on project "logging" on server "https://172.18.6.73:8443".
There was no error output from the command.
[INFO] Testing Kibana pod logging-kibana-ops-1-c11xv for a successful start...
Running test/cluster/functionality.sh:52: executing 'oc exec logging-kibana-ops-1-c11xv -c kibana -- curl -s --request HEAD --write-out '%{response_code}' http://localhost:5601/' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 120.281s: test/cluster/functionality.sh:52: executing 'oc exec logging-kibana-ops-1-c11xv -c kibana -- curl -s --request HEAD --write-out '%{response_code}' http://localhost:5601/' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:53: executing 'oc get pod logging-kibana-ops-1-c11xv -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.212s: test/cluster/functionality.sh:53: executing 'oc get pod logging-kibana-ops-1-c11xv -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
Running test/cluster/functionality.sh:54: executing 'oc get pod logging-kibana-ops-1-c11xv -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana-proxy")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.216s: test/cluster/functionality.sh:54: executing 'oc get pod logging-kibana-ops-1-c11xv -o jsonpath='{ .status.containerStatuses[?(@.name=="kibana-proxy")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
[INFO] Testing Elasticsearch pod logging-es-ops-data-master-ulq5p7o9-1-v4mfc for a successful start...
Running test/cluster/functionality.sh:59: executing 'curl_es 'logging-es-ops-data-master-ulq5p7o9-1-v4mfc' '/' -X HEAD -w '%{response_code}'' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.384s: test/cluster/functionality.sh:59: executing 'curl_es 'logging-es-ops-data-master-ulq5p7o9-1-v4mfc' '/' -X HEAD -w '%{response_code}'' expecting any result and text '200'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:60: executing 'oc get pod logging-es-ops-data-master-ulq5p7o9-1-v4mfc -o jsonpath='{ .status.containerStatuses[?(@.name=="elasticsearch")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s...
SUCCESS after 0.221s: test/cluster/functionality.sh:60: executing 'oc get pod logging-es-ops-data-master-ulq5p7o9-1-v4mfc -o jsonpath='{ .status.containerStatuses[?(@.name=="elasticsearch")].ready }'' expecting any result and text 'true'; re-trying every 0.2s until completion or 60.000s
Standard output from the command:
true
There was no error output from the command.
[INFO] Checking that Elasticsearch pod logging-es-ops-data-master-ulq5p7o9-1-v4mfc recovered its indices after starting...
Running test/cluster/functionality.sh:63: executing 'curl_es 'logging-es-ops-data-master-ulq5p7o9-1-v4mfc' '/_cluster/state/master_node' -w '%{response_code}'' expecting any result and text '}200$'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.380s: test/cluster/functionality.sh:63: executing 'curl_es 'logging-es-ops-data-master-ulq5p7o9-1-v4mfc' '/_cluster/state/master_node' -w '%{response_code}'' expecting any result and text '}200$'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
{"cluster_name":"logging-es-ops","master_node":"suRIFh5HSeiU0l-O_l2TKw"}200
There was no error output from the command.
[INFO] Elasticsearch pod logging-es-ops-data-master-ulq5p7o9-1-v4mfc is the master
[INFO] Checking that Elasticsearch pod logging-es-ops-data-master-ulq5p7o9-1-v4mfc has persisted indices created by Fluentd...
Running test/cluster/functionality.sh:76: executing 'curl_es 'logging-es-ops-data-master-ulq5p7o9-1-v4mfc' '/_cat/indices?h=index'' expecting any result and text '^(project|\.operations)\.'; re-trying every 0.2s until completion or 600.000s...
SUCCESS after 0.408s: test/cluster/functionality.sh:76: executing 'curl_es 'logging-es-ops-data-master-ulq5p7o9-1-v4mfc' '/_cat/indices?h=index'' expecting any result and text '^(project|\.operations)\.'; re-trying every 0.2s until completion or 600.000s
Standard output from the command:
.kibana.d033e22ae348aeb5660fc2140aec35850c4da997
.operations.2017.06.08
.kibana
.searchguard.logging-es-ops-data-master-ulq5p7o9-1-v4mfc
There was no error output from the command.
[INFO] Cheking for index .operations with Kibana pod logging-kibana-ops-1-c11xv...
Running test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-ops-1-c11xv' 'logging-es-ops:9200' '.operations' '/var/log/messages' '500' 'admin' 'NsaYDHpV4r-MDIL27zAFEIM8IauAiA4ej82p16aw74k' '127.0.0.1'' expecting success...
SUCCESS after 0.788s: test/cluster/functionality.sh:100: executing 'sudo -E VERBOSE=true go run '/data/src/github.com/openshift/origin-aggregated-logging/hack/testing/check-logs.go' 'logging-kibana-ops-1-c11xv' 'logging-es-ops:9200' '.operations' '/var/log/messages' '500' 'admin' 'NsaYDHpV4r-MDIL27zAFEIM8IauAiA4ej82p16aw74k' '127.0.0.1'' expecting success
Standard output from the command:
Executing command [oc exec logging-kibana-ops-1-c11xv -- curl -s --key /etc/kibana/keys/key --cert /etc/kibana/keys/cert --cacert /etc/kibana/keys/ca -H 'X-Proxy-Remote-User: admin' -H 'Authorization: Bearer NsaYDHpV4r-MDIL27zAFEIM8IauAiA4ej82p16aw74k' -H 'X-Forwarded-For: 127.0.0.1' -XGET "https://logging-es-ops:9200/.operations.*/_search?q=hostname:ip-172-18-6-73&fields=message&size=500"]
Failure - no log entries found in Elasticsearch logging-es-ops:9200 for index .operations
There was no error output from the command.
[INFO] Checking that Elasticsearch pod logging-es-ops-data-master-ulq5p7o9-1-v4mfc contains common data model index templates...
Running test/cluster/functionality.sh:105: executing 'oc exec logging-es-ops-data-master-ulq5p7o9-1-v4mfc -- ls -1 /usr/share/elasticsearch/index_templates' expecting success...
SUCCESS after 0.314s: test/cluster/functionality.sh:105: executing 'oc exec logging-es-ops-data-master-ulq5p7o9-1-v4mfc -- ls -1 /usr/share/elasticsearch/index_templates' expecting success
Standard output from the command:
com.redhat.viaq-openshift-operations.template.json
com.redhat.viaq-openshift-project.template.json
There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-ulq5p7o9-1-v4mfc' '/_template/com.redhat.viaq-openshift-operations.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.363s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-ulq5p7o9-1-v4mfc' '/_template/com.redhat.viaq-openshift-operations.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
Running test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-ulq5p7o9-1-v4mfc' '/_template/com.redhat.viaq-openshift-project.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'...
SUCCESS after 0.393s: test/cluster/functionality.sh:107: executing 'curl_es 'logging-es-ops-data-master-ulq5p7o9-1-v4mfc' '/_template/com.redhat.viaq-openshift-project.template.json' -X HEAD -w '%{response_code}'' expecting success and text '200'
Standard output from the command:
200
There was no error output from the command.
running test test-curator.sh
configmap "logging-curator" deleted
configmap "logging-curator" created
deploymentconfig "logging-curator" scaled
deploymentconfig "logging-curator" scaled
Error: the curator pod should be in the error state
Error: did not find the correct error message
error: expected 'logs (POD | TYPE/NAME) [CONTAINER_NAME]'.
POD or TYPE/NAME is a required argument for the logs command
See 'oc logs -h' for help and examples.
The project name length must be less than or equal to 63 characters. This is too long: [this-project-name-is-far-far-too-long-this-project-name-is-far-far-too-long-this-project-name-is-far-far-too-long-this-project-name-is-far-far-too-long]
configmap "logging-curator" deleted
configmap "logging-curator" created
deploymentconfig "logging-curator" scaled
deploymentconfig "logging-curator" scaled
[ERROR] PID 4249: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:303: `echo running test $test` exited with status 1.
[INFO] Stack Trace:
[INFO] 1: /data/src/github.com/openshift/origin-aggregated-logging/logging.sh:303: `echo running test $test`
[INFO] Exiting with code 1.
/data/src/github.com/openshift/origin-aggregated-logging/hack/lib/log/system.sh: line 31: 4617 Terminated sar -A -o "${binary_logfile}" 1 86400 > /dev/null 2> "${stderr_logfile}" (wd: /data/src/github.com/openshift/origin-aggregated-logging)
[INFO] [CLEANUP] Beginning cleanup routines...
[INFO] [CLEANUP] Dumping cluster events to /tmp/origin-aggregated-logging/artifacts/events.txt
[INFO] [CLEANUP] Dumping etcd contents to /tmp/origin-aggregated-logging/artifacts/etcd
[WARNING] No compiled `etcdhelper` binary was found. Attempting to build one using:
[WARNING] $ hack/build-go.sh tools/etcdhelper
++ Building go targets for linux/amd64: tools/etcdhelper
/data/src/github.com/openshift/origin-aggregated-logging/../origin/hack/build-go.sh took 154 seconds
2017-06-08 16:14:58.906849 I | warning: ignoring ServerName for user-provided CA for backwards compatibility is deprecated
[INFO] [CLEANUP] Dumping container logs to /tmp/origin-aggregated-logging/logs/containers
[INFO] [CLEANUP] Truncating log files over 200M
[INFO] [CLEANUP] Stopping docker containers
[INFO] [CLEANUP] Removing docker containers
Error response from daemon: You cannot remove a running container 8b7bfe21b130fc4ec1a1713142a8c838c928faa50710206fda03eb5f2ba88558. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 7ec5b664d4f5d20dafd3caad82481633c81a33b20556de734e9152b23aeb5b05. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 1dd59beca3e6ba35ffd151cbe0a9d23603c811499632fd3dbe3c341e4ad5c512. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 99352b6fc1f15a02b7d653b0ed001940c5eba67a3f52966111b3844de1808d48. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container f00b52830dea067bdf8c332aface2f7ccca67ed4bde53d6c14b76f95788e6e30. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 9e20a9371cee6e0a49095866ec8b694518914b048a159b64c30377545cd50971. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 4f1e1f27cf38632448867cd0de254b23eaef528d30b4044e947e416261e2a405. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 19533e3caeeeca421bf5c5cc40a4130f956026b264c1297b5a3e3e1da25c4065. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 737dfe6f1e924b16588ec80e61ee60198fb1b9fbb43bd429485447e7baf92b52. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 6fef692c74095353b69250426c9b610e80e78cb40b6a693614ff781f6e90af70. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 57a5f832a5cec84fced9a1771711d83c41bd06490b35ca17f205e27c81344d5b. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 2d39523b5619113eee27b83bd3c9540eba1b1737f41c0c240956e85222725940. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 04454ada8d61650fcf62cb8a6b43fc7333e0df8459e0fb7d5eafdab75f692c09. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 4e964783d64fce4975417690121eb065fa2232d67ba2b953365722e848b5630f. Stop the container before attempting removal or use -f
Error response from daemon: You cannot remove a running container 252f64f5d3b8376c7dc9321d5a57de69c6176b4f9453e970e8b154866beece78. Stop the container before attempting removal or use -f
[INFO] [CLEANUP] Killing child processes
[INFO] [CLEANUP] Pruning etcd data directory
[ERROR] /data/src/github.com/openshift/origin-aggregated-logging/logging.sh exited with code 1 after 00h 36m 21s
Error while running ssh/sudo command:
set -e
pushd /data/src/github.com/openshift//origin-aggregated-logging/hack/testing >/dev/null
export PATH=$GOPATH/bin:$PATH
echo '***************************************************'
echo 'Running GIT_URL=https://github.com/openshift/origin-aggregated-logging GIT_BRANCH=master O_A_L_DIR=/data/src/github.com/openshift/origin-aggregated-logging OS_ROOT=/data/src/github.com/openshift/origin ENABLE_OPS_CLUSTER=true USE_LOCAL_SOURCE=true TEST_PERF=false VERBOSE=1 OS_ANSIBLE_REPO=https://github.com/openshift/openshift-ansible OS_ANSIBLE_BRANCH=master ./logging.sh...'
time GIT_URL=https://github.com/openshift/origin-aggregated-logging GIT_BRANCH=master O_A_L_DIR=/data/src/github.com/openshift/origin-aggregated-logging OS_ROOT=/data/src/github.com/openshift/origin ENABLE_OPS_CLUSTER=true USE_LOCAL_SOURCE=true TEST_PERF=false VERBOSE=1 OS_ANSIBLE_REPO=https://github.com/openshift/openshift-ansible OS_ANSIBLE_BRANCH=master ./logging.sh
echo 'Finished GIT_URL=https://github.com/openshift/origin-aggregated-logging GIT_BRANCH=master O_A_L_DIR=/data/src/github.com/openshift/origin-aggregated-logging OS_ROOT=/data/src/github.com/openshift/origin ENABLE_OPS_CLUSTER=true USE_LOCAL_SOURCE=true TEST_PERF=false VERBOSE=1 OS_ANSIBLE_REPO=https://github.com/openshift/openshift-ansible OS_ANSIBLE_BRANCH=master ./logging.sh'
echo '***************************************************'
popd >/dev/null
The SSH command responded with a non-zero exit status. Vagrant
assumes that this means the command failed. The output for this command
should be in the log above. Please read the output to determine what
went wrong.
==> openshiftdev: Downloading logs
==> openshiftdev: Downloading artifacts from '/var/log/yum.log' to '/var/lib/jenkins/jobs/test-origin-aggregated-logging/workspace@2/origin/artifacts/yum.log'
==> openshiftdev: Downloading artifacts from '/var/log/secure' to '/var/lib/jenkins/jobs/test-origin-aggregated-logging/workspace@2/origin/artifacts/secure'
==> openshiftdev: Downloading artifacts from '/var/log/audit/audit.log' to '/var/lib/jenkins/jobs/test-origin-aggregated-logging/workspace@2/origin/artifacts/audit.log'
==> openshiftdev: Downloading artifacts from '/tmp/origin-aggregated-logging/' to '/var/lib/jenkins/jobs/test-origin-aggregated-logging/workspace@2/origin/artifacts'
Build step 'Execute shell' marked build as failure
[description-setter] Could not determine description.
[PostBuildScript] - Execution post build scripts.
[workspace@2] $ /bin/sh -xe /tmp/hudson7868545772202054506.sh
+ INSTANCE_NAME=origin_logging-rhel7-1636
+ pushd origin
~/jobs/test-origin-aggregated-logging/workspace@2/origin ~/jobs/test-origin-aggregated-logging/workspace@2
+ rc=0
+ '[' -f .vagrant-openshift.json ']'
++ /usr/bin/vagrant ssh -c 'sudo ausearch -m avc'
+ ausearchresult='<no matches>'
+ rc=1
+ '[' '<no matches>' = '<no matches>' ']'
+ rc=0
+ /usr/bin/vagrant destroy -f
==> openshiftdev: Terminating the instance...
==> openshiftdev: Running cleanup tasks for 'shell' provisioner...
+ popd
~/jobs/test-origin-aggregated-logging/workspace@2
+ exit 0
[BFA] Scanning build for known causes...
[BFA] Found failure cause(s):
[BFA] Command Failure from category failure
[BFA] Done. 1s
Finished: FAILURE