FailedConsole Output

Skipping 6,148 KB.. Full Log
    		I0523 09:29:08.219751    1146 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (94.319231ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:56562]
    		I0523 09:29:08.245191    1146 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets?fieldSelector=type%3Dkubernetes.io%2Fservice-account-token&limit=500: (274.039687ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:56562]
    		I0523 09:29:08.264253    1146 wrap.go:42] POST /api/v1/namespaces/openshift-infra/secrets: (183.101575ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:56562]
    		INFO: 2018/05/23 09:29:08 ccBalancerWrapper: updating state and picker called by balancer: IDLE, 0xc4383c44e0
    		INFO: 2018/05/23 09:29:08 dialing to target with scheme: ""
    		INFO: 2018/05/23 09:29:08 could not get resolver for scheme: ""
    		INFO: 2018/05/23 09:29:08 balancerWrapper: is pickfirst: false
    		INFO: 2018/05/23 09:29:08 balancerWrapper: got update addr from Notify: [{127.0.0.1:13015 <nil>}]
    		INFO: 2018/05/23 09:29:08 ccBalancerWrapper: new subconn: [{127.0.0.1:13015 0  <nil>}]
    		INFO: 2018/05/23 09:29:08 balancerWrapper: handle subconn state change: 0xc42f353230, CONNECTING
    		INFO: 2018/05/23 09:29:08 ccBalancerWrapper: updating state and picker called by balancer: CONNECTING, 0xc4383c44e0
    		I0523 09:29:08.278024    1146 wrap.go:42] GET /apis/authentication.k8s.io/v1?timeout=32s: (75.322554ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:56562]
    		I0523 09:29:08.284030    1146 wrap.go:42] GET /api/v1/namespaces/default: (12.035156ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:56882]
    		I0523 09:29:08.301901    1146 wrap.go:42] GET /api/v1/namespaces/kube-system/configmaps/kube-controller-manager: (18.524789ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:56562]
    		I0523 09:29:08.318400    1146 get.go:238] Starting watch for /api/v1/namespaces/openshift-infra/secrets, rv=346 labels= fields=type=kubernetes.io/service-account-token timeout=1h36m36.692675557s
    		I0523 09:29:08.326932    1146 wrap.go:42] GET /api/v1/namespaces/default/services/kubernetes: (24.492995ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:56882]
    		I0523 09:29:08.336609    1146 wrap.go:42] GET /apis/authentication.k8s.io/v1beta1?timeout=32s: (2.281551ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:56562]
    		INFO: 2018/05/23 09:29:08 balancerWrapper: handle subconn state change: 0xc42f353230, READY
    		INFO: 2018/05/23 09:29:08 ccBalancerWrapper: updating state and picker called by balancer: READY, 0xc4383c44e0
    		INFO: 2018/05/23 09:29:08 balancerWrapper: got update addr from Notify: [{127.0.0.1:13015 <nil>}]
    		
    --- PASS: TestIntegration/TestApiGroups (35.50s)
    	runner_test.go:187: 
    			master_routes_test.go:430: Looking for build api group in server group discovery
    			master_routes_test.go:445: Looking for builds resource in resource discovery
    			master_routes_test.go:471: Creating test namespace "testapigroup706774922"
    			master_routes_test.go:478: GETting builds
    			master_routes_test.go:489: Creating a Build
    			master_routes_test.go:496: GETting builds again
    		
    		=== OUTPUT
    		1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40810]
    		I0523 09:29:09.000537    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets?fieldSelector=type%3Dkubernetes.io%2Fservice-account-token&limit=500: (2.161255ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.001740    1155 get.go:238] Starting watch for /api/v1/namespaces/openshift-infra/secrets, rv=322 labels= fields=type=kubernetes.io/service-account-token timeout=1h47m34.608944768s
    		I0523 09:29:09.013510    1155 wrap.go:42] POST /api/v1/namespaces/openshift-infra/secrets: (7.267509ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40810]
    		I0523 09:29:09.023609    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/build-config-change-controller-token-n5znq: (7.975889ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.039855    1155 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/serviceaccounts/build-config-change-controller: (25.240943ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40810]
    		I0523 09:29:09.040365    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/serviceaccounts/build-config-change-controller: (16.206861ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.041228    1155 wrap.go:42] POST /apis/authentication.k8s.io/v1/tokenreviews: (27.161308ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.051030    1155 client_builder.go:233] Verified credential for build-config-change-controller/openshift-infra
    		I0523 09:29:09.051632    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets?fieldSelector=type%3Dkubernetes.io%2Fservice-account-token&resourceVersion=322&watch=true: (50.322835ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.053315    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/serviceaccounts/build-config-change-controller: (1.404811ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.085792    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets?fieldSelector=type%3Dkubernetes.io%2Fservice-account-token&limit=500: (32.05536ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.087498    1155 wrap.go:42] POST /apis/authentication.k8s.io/v1/tokenreviews: (774.957µs) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.087748    1155 client_builder.go:233] Verified credential for build-config-change-controller/openshift-infra
    		I0523 09:29:09.087949    1155 controller_manager.go:201] Started "openshift.io/build-config-change"
    		I0523 09:29:09.087965    1155 controller_manager.go:191] Starting "openshift.io/deploymentconfig"
    		I0523 09:29:09.089440    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/serviceaccounts/deploymentconfig-controller: (1.262137ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.091041    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra: (1.187334ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.093804    1155 wrap.go:42] POST /api/v1/namespaces/openshift-infra/serviceaccounts: (2.286438ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.129312    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (16.580809ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.129568    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (16.35706ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.129754    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (17.604359ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.129966    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (24.657559ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.130094    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (25.276473ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.130220    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (25.865233ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.130254    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets?fieldSelector=type%3Dkubernetes.io%2Fservice-account-token&limit=500: (34.988837ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.130402    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (26.544192ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.130521    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/serviceaccounts/deploymentconfig-controller: (35.790295ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40810]
    		I0523 09:29:09.130553    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (24.787347ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.130660    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (27.36007ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.132396    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (1.661737ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.133239    1155 get.go:238] Starting watch for /api/v1/namespaces/openshift-infra/secrets, rv=325 labels= fields=type=kubernetes.io/service-account-token timeout=1h26m44.730891648s
    		I0523 09:29:09.149721    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (8.413114ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.149958    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (10.768609ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.150417    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (10.085057ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.155959    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (15.148992ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.156154    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (16.345052ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.161003    1155 wrap.go:42] POST /api/v1/namespaces/openshift-infra/secrets: (12.050707ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40810]
    		I0523 09:29:09.164658    1155 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (22.904946ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.165815    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/deploymentconfig-controller-token-t5sv5: (1.2926ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.168055    1155 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/serviceaccounts/deploymentconfig-controller: (5.492268ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40810]
    		I0523 09:29:09.176367    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/serviceaccounts/deploymentconfig-controller: (10.106791ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.177514    1155 wrap.go:42] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin?timeout=5m0s: (3.573371ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:41336]
    		I0523 09:29:09.179069    1155 wrap.go:42] POST /apis/authentication.k8s.io/v1/tokenreviews: (16.602269ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.179421    1155 client_builder.go:233] Verified credential for deploymentconfig-controller/openshift-infra
    		I0523 09:29:09.181346    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets?fieldSelector=type%3Dkubernetes.io%2Fservice-account-token&resourceVersion=325&watch=true: (48.586709ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.193567    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/serviceaccounts/deploymentconfig-controller: (12.159771ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.193824    1155 wrap.go:42] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit?timeout=5m0s: (13.708999ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:41336]
    		I0523 09:29:09.213858    1155 wrap.go:42] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view?timeout=5m0s: (16.920327ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:41336]
    		I0523 09:29:09.214287    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets?fieldSelector=type%3Dkubernetes.io%2Fservice-account-token&limit=500: (16.675476ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.215959    1155 wrap.go:42] POST /apis/authentication.k8s.io/v1/tokenreviews: (802.581µs) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.216224    1155 client_builder.go:233] Verified credential for deploymentconfig-controller/openshift-infra
    		I0523 09:29:09.216481    1155 controller_manager.go:201] Started "openshift.io/deploymentconfig"
    		I0523 09:29:09.216496    1155 controller_manager.go:191] Starting "openshift.io/image-trigger"
    		I0523 09:29:09.216642    1155 factory.go:79] Starting deploymentconfig controller
    		I0523 09:29:09.218124    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/serviceaccounts/image-trigger-controller: (1.367733ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.218426    1155 wrap.go:42] GET /api?timeout=5m0s: (607.146µs) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:41336]
    		I0523 09:29:09.219758    1155 wrap.go:42] GET /apis?timeout=5m0s: (946.483µs) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:41336]
    		I0523 09:29:09.219816    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra: (1.187094ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.221303    1155 wrap.go:42] GET /apis/build.openshift.io/v1?timeout=5m0s: (611.073µs) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:41336]
    		I0523 09:29:09.223621    1155 wrap.go:42] POST /api/v1/namespaces/openshift-infra/serviceaccounts: (3.264122ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.244252    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets?fieldSelector=type%3Dkubernetes.io%2Fservice-account-token&limit=500: (18.553751ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.244564    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/serviceaccounts/image-trigger-controller: (19.188202ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40810]
    		I0523 09:29:09.244982    1155 wrap.go:42] POST /api/v1/namespaces?timeout=5m0s: (20.95222ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:41336]
    		I0523 09:29:09.253037    1155 get.go:238] Starting watch for /api/v1/namespaces/openshift-infra/secrets, rv=329 labels= fields=type=kubernetes.io/service-account-token timeout=1h27m57.654163388s
    		I0523 09:29:09.253618    1155 wrap.go:42] GET /apis/build.openshift.io/v1: (474.768µs) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:41336]
    		I0523 09:29:09.263635    1155 wrap.go:42] POST /api/v1/namespaces/openshift-infra/secrets: (9.86079ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40810]
    		I0523 09:29:09.267684    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/image-trigger-controller-token-ctr62: (1.477244ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.269216    1155 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/serviceaccounts/image-trigger-controller: (4.043442ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40810]
    		I0523 09:29:09.285735    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/serviceaccounts/image-trigger-controller: (17.523332ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.286463    1155 wrap.go:42] PUT /api/v1/namespaces/testapigroup706774922/finalize: (21.834316ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.287221    1155 wrap.go:42] POST /apis/authentication.k8s.io/v1/tokenreviews: (22.562883ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.289757    1155 wrap.go:42] POST /apis/authorization.k8s.io/v1/subjectaccessreviews: (1.726433ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.290525    1155 controller.go:537] quota admission added evaluator for: {build.openshift.io builds}
    		I0523 09:29:09.290961    1155 controller.go:537] quota admission added evaluator for: {build.openshift.io builds}
    		I0523 09:29:09.291201    1155 client_builder.go:233] Verified credential for image-trigger-controller/openshift-infra
    		I0523 09:29:09.292547    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets?fieldSelector=type%3Dkubernetes.io%2Fservice-account-token&resourceVersion=329&watch=true: (40.1647ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.293777    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/serviceaccounts/image-trigger-controller: (1.173996ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.293782    1155 wrap.go:42] GET /api/v1/namespaces/testapigroup706774922/resourcequotas: (1.984133ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.314212    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets?fieldSelector=type%3Dkubernetes.io%2Fservice-account-token&limit=500: (19.921486ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.314790    1155 wrap.go:42] POST /apis/build.openshift.io/v1/namespaces/testapigroup706774922/builds?timeout=5m0s: (51.819248ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:41336]
    		I0523 09:29:09.317070    1155 wrap.go:42] POST /apis/authentication.k8s.io/v1/tokenreviews: (810.317µs) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.318686    1155 client_builder.go:233] Verified credential for image-trigger-controller/openshift-infra
    		I0523 09:29:09.331660    1155 wrap.go:42] GET /apis/build.openshift.io/v1/namespaces/testapigroup706774922/builds/foo: (14.322961ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:41336]
    		I0523 09:29:09.332069    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/serviceaccounts/image-trigger-controller: (13.127935ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.335640    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets?fieldSelector=type%3Dkubernetes.io%2Fservice-account-token&limit=500: (2.352182ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.337248    1155 wrap.go:42] DELETE /api/v1/namespaces/testapigroup706774922?timeout=5m0s: (3.934343ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:41336]
    		I0523 09:29:09.337248    1155 wrap.go:42] POST /apis/authentication.k8s.io/v1/tokenreviews: (679.248µs) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		I0523 09:29:09.337721    1155 client_builder.go:233] Verified credential for image-trigger-controller/openshift-infra
    		INFO: 2018/05/23 09:29:09 ccBalancerWrapper: updating state and picker called by balancer: IDLE, 0xc435963aa0
    		INFO: 2018/05/23 09:29:09 dialing to target with scheme: ""
    		INFO: 2018/05/23 09:29:09 could not get resolver for scheme: ""
    		INFO: 2018/05/23 09:29:09 balancerWrapper: is pickfirst: false
    		INFO: 2018/05/23 09:29:09 balancerWrapper: got update addr from Notify: [{127.0.0.1:13648 <nil>}]
    		INFO: 2018/05/23 09:29:09 ccBalancerWrapper: new subconn: [{127.0.0.1:13648 0  <nil>}]
    		I0523 09:29:09.338855    1155 controller_manager.go:201] Started "openshift.io/image-trigger"
    		I0523 09:29:09.338915    1155 controller_manager.go:191] Starting "openshift.io/sdn"
    		W0523 09:29:09.338927    1155 controller_manager.go:198] Skipping "openshift.io/sdn"
    		I0523 09:29:09.338935    1155 controller_manager.go:191] Starting "openshift.io/resourcequota"
    		I0523 09:29:09.339075    1155 image_trigger_controller.go:215] Starting trigger controller
    		INFO: 2018/05/23 09:29:09 balancerWrapper: handle subconn state change: 0xc42f06bac0, CONNECTING
    		INFO: 2018/05/23 09:29:09 ccBalancerWrapper: updating state and picker called by balancer: CONNECTING, 0xc435963aa0
    		I0523 09:29:09.356401    1155 wrap.go:42] GET /api/v1/namespaces/openshift-infra/serviceaccounts/resourcequota-controller: (17.09738ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40810]
    		INFO: 2018/05/23 09:29:09 balancerWrapper: handle subconn state change: 0xc42f06bac0, READY
    		INFO: 2018/05/23 09:29:09 ccBalancerWrapper: updating state and picker called by balancer: READY, 0xc435963aa0
    		
    --- PASS: TestIntegration/TestAlwaysPullImagesOn (46.17s)
    	runner_test.go:187: 
    		
    		=== OUTPUT
    		23 09:29:13.647927    1137 wrap.go:42] POST /api/v1/namespaces/openshift-node/secrets: (11.761471ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:36374]
    		I0523 09:29:13.651026    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (1.563117ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.651209    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (2.500236ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.671976    1137 wrap.go:42] PUT /api/v1/namespaces/openshift-node/serviceaccounts/default: (11.695112ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:36374]
    		I0523 09:29:13.705245    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (8.515654ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.705487    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (4.503135ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.705662    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (9.522421ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.705865    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (8.638441ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.706571    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (6.055228ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.707032    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (8.84465ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.707223    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (9.527326ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.707401    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (7.336615ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.707563    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (7.959604ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.707709    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (8.558055ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.707851    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (9.197648ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:13.709202    1137 wrap.go:42] POST /api/v1/namespaces/integration/secrets: (14.797719ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:36374]
    		I0523 09:29:13.745266    1137 wrap.go:42] PUT /api/v1/namespaces/integration/serviceaccounts/default: (10.650701ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:36374]
    		I0523 09:29:13.765999    1137 request.go:485] Throttling request took 55.327068ms, request: POST:https://127.0.0.1:21197/api/v1/namespaces/integration/secrets
    		I0523 09:29:13.783034    1137 wrap.go:42] POST /api/v1/namespaces/integration/secrets: (5.820387ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:36374]
    		I0523 09:29:13.851319    1137 wrap.go:42] PUT /api/v1/namespaces/integration/serviceaccounts/builder: (42.750764ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:36374]
    		I0523 09:29:13.852391    1137 wrap.go:42] POST /api/v1/namespaces/integration/secrets: (30.41848ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:36374]
    		I0523 09:29:13.859793    1137 wrap.go:42] PUT /api/v1/namespaces/integration/serviceaccounts/deployer: (5.618669ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:36374]
    		I0523 09:29:14.202896    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (1.706069ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:14.203123    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (2.79929ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:14.401309    1137 wrap.go:42] GET /api/v1/namespaces/integration/serviceaccounts/default?timeout=5m0s: (1.9332ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36392]
    		I0523 09:29:14.403553    1137 wrap.go:42] GET /api/v1/namespaces/integration/secrets/default-token-fl74g?timeout=5m0s: (1.528174ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36392]
    		I0523 09:29:14.405542    1137 wrap.go:42] GET /api/v1/namespaces/integration/secrets/default-dockercfg-wqtkk?timeout=5m0s: (1.195185ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36392]
    		I0523 09:29:14.408777    1137 wrap.go:42] GET /api/v1/namespaces/integration/limitranges: (1.370703ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:14.409151    1137 admission.go:97] getting security context constraints for pod  (generate: test) in namespace integration with user info &{system:admin  [system:masters system:cluster-admins system:authenticated] map[]}
    		I0523 09:29:14.409233    1137 admission.go:108] getting security context constraints for pod  (generate: test) with service account info &{system:serviceaccount:integration:default  [system:serviceaccounts system:serviceaccounts:integration] map[]}
    		I0523 09:29:14.413068    1137 wrap.go:42] GET /api/v1/namespaces/integration: (1.39459ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:14.413318    1137 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:14.413364    1137 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace integration
    		I0523 09:29:14.413373    1137 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:14.413382    1137 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:14.413400    1137 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:14.413418    1137 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:14.413436    1137 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace integration
    		I0523 09:29:14.413444    1137 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:14.413452    1137 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:14.413465    1137 matcher.go:342] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:14.413486    1137 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace integration
    		I0523 09:29:14.413494    1137 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:14.413502    1137 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:14.413522    1137 admission.go:217] validating pod  (generate: test) against providers anyuid,restricted,nonroot,hostmount-anyuid,hostnetwork,hostaccess,privileged
    		I0523 09:29:14.413614    1137 admission.go:170] pod  (generate: test) validated against provider anyuid
    		I0523 09:29:14.420693    1137 wrap.go:42] POST /api/v1/namespaces/integration/pods?timeout=5m0s: (14.358591ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36392]
    		I0523 09:29:14.422505    1137 disruption.go:328] addPod called on pod "test2vq8d"
    		I0523 09:29:14.422523    1137 disruption.go:403] No PodDisruptionBudgets found for pod test2vq8d, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:14.422531    1137 disruption.go:331] No matching pdb for pod "test2vq8d"
    		I0523 09:29:14.422741    1137 factory.go:1147] About to try and schedule pod test2vq8d
    		I0523 09:29:14.422754    1137 scheduler.go:439] Attempting to schedule pod: integration/test2vq8d
    		I0523 09:29:14.422776    1137 scheduler.go:191] Failed to schedule pod: integration/test2vq8d
    		I0523 09:29:14.422833    1137 factory.go:1262] Unable to schedule integration test2vq8d: no nodes are registered to the cluster; waiting
    		I0523 09:29:14.422891    1137 factory.go:1375] Updating pod condition for integration/test2vq8d to (PodScheduled==False)
    		I0523 09:29:14.423150    1137 pvc_protection_controller.go:276] Got event on pod integration/test2vq8d
    		I0523 09:29:14.423272    1137 taint_manager.go:345] Noticed pod update: types.NamespacedName{Namespace:"integration", Name:"test2vq8d"}
    		I0523 09:29:14.423374    1137 backoff_utils.go:79] Backing off 1s
    		I0523 09:29:14.430203    1137 wrap.go:42] PUT /api/v1/namespaces/integration/pods/test2vq8d/status: (6.440091ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/scheduler] 127.0.0.1:36078]
    		I0523 09:29:14.431923    1137 disruption.go:340] updatePod called on pod "test2vq8d"
    		I0523 09:29:14.431950    1137 disruption.go:403] No PodDisruptionBudgets found for pod test2vq8d, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:14.431959    1137 disruption.go:343] No matching pdb for pod "test2vq8d"
    		I0523 09:29:14.432147    1137 pvc_protection_controller.go:276] Got event on pod integration/test2vq8d
    		I0523 09:29:14.432767    1137 factory.go:1147] About to try and schedule pod test2vq8d
    		I0523 09:29:14.432781    1137 scheduler.go:439] Attempting to schedule pod: integration/test2vq8d
    		I0523 09:29:14.432801    1137 scheduler.go:191] Failed to schedule pod: integration/test2vq8d
    		I0523 09:29:14.432823    1137 factory.go:1262] Unable to schedule integration test2vq8d: no nodes are registered to the cluster; waiting
    		I0523 09:29:14.432884    1137 factory.go:1375] Updating pod condition for integration/test2vq8d to (PodScheduled==False)
    		W0523 09:29:14.432936    1137 factory.go:1304] Request for pod integration/test2vq8d already in flight, abandoning
    		I0523 09:29:14.445232    1137 wrap.go:42] POST /api/v1/namespaces/integration/events: (20.14466ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:36078]
    		I0523 09:29:14.445685    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (2.615171ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:14.446170    1137 factory.go:1147] About to try and schedule pod test2vq8d
    		I0523 09:29:14.446205    1137 scheduler.go:435] Skip schedule deleting pod: integration/test2vq8d
    		I0523 09:29:14.446981    1137 disruption.go:340] updatePod called on pod "test2vq8d"
    		I0523 09:29:14.447006    1137 disruption.go:403] No PodDisruptionBudgets found for pod test2vq8d, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:14.447015    1137 disruption.go:343] No matching pdb for pod "test2vq8d"
    		I0523 09:29:14.447034    1137 pvc_protection_controller.go:276] Got event on pod integration/test2vq8d
    		I0523 09:29:14.447098    1137 resource_quota_monitor.go:352] QuotaMonitor process object: /v1, Resource=pods, namespace integration, name test2vq8d, uid c200b8e7-5e6b-11e8-9194-0242ac110002, event type update
    		I0523 09:29:14.447203    1137 resource_quota_monitor.go:352] QuotaMonitor process object: /v1, Resource=pods, namespace integration, name test2vq8d, uid c200b8e7-5e6b-11e8-9194-0242ac110002, event type update
    		I0523 09:29:14.473433    1137 wrap.go:42] DELETE /api/v1/namespaces/integration/pods/test2vq8d?timeout=5m0s: (47.09279ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36392]
    		I0523 09:29:14.474114    1137 disruption.go:369] deletePod called on pod "test2vq8d"
    		I0523 09:29:14.474140    1137 disruption.go:403] No PodDisruptionBudgets found for pod test2vq8d, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:14.474148    1137 disruption.go:372] No matching pdb for pod "test2vq8d"
    		I0523 09:29:14.474199    1137 resource_quota_monitor.go:352] QuotaMonitor process object: /v1, Resource=pods, namespace integration, name test2vq8d, uid c200b8e7-5e6b-11e8-9194-0242ac110002, event type delete
    		I0523 09:29:14.474246    1137 pvc_protection_controller.go:276] Got event on pod integration/test2vq8d
    		I0523 09:29:14.474296    1137 resource_quota_monitor.go:352] QuotaMonitor process object: /v1, Resource=pods, namespace integration, name test2vq8d, uid c200b8e7-5e6b-11e8-9194-0242ac110002, event type delete
    		I0523 09:29:14.474341    1137 deployment_controller.go:357] Pod test2vq8d deleted.
    		I0523 09:29:14.474396    1137 taint_manager.go:338] Noticed pod deletion: types.NamespacedName{Namespace:"integration", Name:"test2vq8d"}
    		I0523 09:29:14.475186    1137 admission.go:97] getting security context constraints for pod  (generate: test) in namespace integration with user info &{system:admin  [system:masters system:cluster-admins system:authenticated] map[]}
    		I0523 09:29:14.475240    1137 admission.go:108] getting security context constraints for pod  (generate: test) with service account info &{system:serviceaccount:integration:default  [system:serviceaccounts system:serviceaccounts:integration] map[]}
    		I0523 09:29:14.483195    1137 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (11.060823ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:14.483353    1137 wrap.go:42] GET /api/v1/namespaces/integration: (2.247026ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36078]
    		I0523 09:29:14.483650    1137 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:14.483692    1137 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace integration
    		I0523 09:29:14.483702    1137 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:14.483712    1137 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:14.483732    1137 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:14.483748    1137 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:14.483764    1137 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace integration
    		I0523 09:29:14.483772    1137 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:14.483780    1137 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:14.483793    1137 matcher.go:342] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:14.483814    1137 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace integration
    		I0523 09:29:14.483824    1137 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:14.483831    1137 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:14.483859    1137 admission.go:217] validating pod  (generate: test) against providers anyuid,restricted,nonroot,hostmount-anyuid,hostnetwork,hostaccess,privileged
    		I0523 09:29:14.483931    1137 wrap.go:42] PATCH /api/v1/namespaces/integration/events/test2vq8d.15313c082a53feb4: (36.541609ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:36078]
    		I0523 09:29:14.483960    1137 admission.go:170] pod  (generate: test) validated against provider anyuid
    		I0523 09:29:14.486420    1137 wrap.go:42] POST /api/v1/namespaces/integration/pods?timeout=5m0s: (12.092716ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:36392]
    		INFO: 2018/05/23 09:29:14 ccBalancerWrapper: updating state and picker called by balancer: IDLE, 0xc43428f740
    		INFO: 2018/05/23 09:29:14 dialing to target with scheme: ""
    		INFO: 2018/05/23 09:29:14 could not get resolver for scheme: ""
    		INFO: 2018/05/23 09:29:14 balancerWrapper: is pickfirst: false
    		INFO: 2018/05/23 09:29:14 balancerWrapper: got update addr from Notify: [{127.0.0.1:18910 <nil>}]
    		INFO: 2018/05/23 09:29:14 ccBalancerWrapper: new subconn: [{127.0.0.1:18910 0  <nil>}]
    		I0523 09:29:14.498461    1137 disruption.go:328] addPod called on pod "test6n6mq"
    		I0523 09:29:14.498480    1137 disruption.go:403] No PodDisruptionBudgets found for pod test6n6mq, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:14.498488    1137 disruption.go:331] No matching pdb for pod "test6n6mq"
    		I0523 09:29:14.498648    1137 factory.go:1147] About to try and schedule pod test6n6mq
    		I0523 09:29:14.498660    1137 scheduler.go:439] Attempting to schedule pod: integration/test6n6mq
    		I0523 09:29:14.498681    1137 scheduler.go:191] Failed to schedule pod: integration/test6n6mq
    		I0523 09:29:14.498702    1137 factory.go:1262] Unable to schedule integration test6n6mq: no nodes are registered to the cluster; waiting
    		I0523 09:29:14.498737    1137 factory.go:1375] Updating pod condition for integration/test6n6mq to (PodScheduled==False)
    		INFO: 2018/05/23 09:29:14 balancerWrapper: handle subconn state change: 0xc42b41e180, CONNECTING
    		INFO: 2018/05/23 09:29:14 ccBalancerWrapper: updating state and picker called by balancer: CONNECTING, 0xc43428f740
    		I0523 09:29:14.499059    1137 pvc_protection_controller.go:276] Got event on pod integration/test6n6mq
    		I0523 09:29:14.499150    1137 taint_manager.go:345] Noticed pod update: types.NamespacedName{Namespace:"integration", Name:"test6n6mq"}
    		I0523 09:29:14.499232    1137 backoff_utils.go:79] Backing off 1s
    		INFO: 2018/05/23 09:29:14 balancerWrapper: handle subconn state change: 0xc42b41e180, READY
    		INFO: 2018/05/23 09:29:14 ccBalancerWrapper: updating state and picker called by balancer: READY, 0xc43428f740
    		
    --- PASS: TestIntegration/TestAggregator (36.04s)
    	runner_test.go:187: 
    		INFO: 2018/05/23 09:29:44 balancerWrapper: got update addr from Notify: [{127.0.0.1:27728 <nil>}]
    		
    		=== OUTPUT
    		/secrets/service-account-controller-token-296nz: (9.277076ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.212213    1186 wrap.go:42] GET /api/v1/namespaces/kube-system/secrets/statefulset-controller-token-jqfzg: (2.081344ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.218799    1186 wrap.go:42] PUT /api/v1/namespaces/kube-system/secrets/pvc-protection-controller-token-pzrlf: (12.588326ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.225362    1186 wrap.go:42] PUT /api/v1/namespaces/kube-system/secrets/replication-controller-token-7whp5: (15.992342ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.225685    1186 wrap.go:42] PUT /api/v1/namespaces/kube-system/secrets/resourcequota-controller-token-446d4: (15.614483ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.227417    1186 wrap.go:42] GET /api/v1/namespaces/kube-system/secrets/service-controller-token-rlj2c: (7.184844ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.229042    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/serviceaccounts/image-trigger-controller: (5.835837ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:40860]
    		I0523 09:29:44.229242    1186 create_dockercfg_secrets.go:441] Creating token secret "image-trigger-controller-token-c8nrl" for service account openshift-infra/image-trigger-controller
    		I0523 09:29:44.232147    1186 wrap.go:42] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-public/rolebindings: (153.317834ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:default-rolebindings-controller] 127.0.0.1:40860]
    		I0523 09:29:44.232785    1186 wrap.go:42] PUT /api/v1/namespaces/kube-system/secrets/service-account-controller-token-296nz: (15.312506ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.232966    1186 wrap.go:42] PUT /api/v1/namespaces/kube-system/secrets/statefulset-controller-token-jqfzg: (14.610945ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.233780    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/cluster-quota-reconciliation-controller-token-sb2ml: (3.97951ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.234582    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/build-controller-token-x2dpv: (4.222991ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.238626    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/default-token-4b5gz: (1.520403ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.238915    1186 wrap.go:42] POST /apis/rbac.authorization.k8s.io/v1/namespaces/openshift/rolebindings: (135.247349ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:default-rolebindings-controller] 127.0.0.1:40860]
    		I0523 09:29:44.239491    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/build-config-change-controller-token-w4mbt: (2.904297ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.245001    1186 wrap.go:42] PUT /api/v1/namespaces/kube-system/secrets/service-controller-token-rlj2c: (10.578432ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.247990    1186 wrap.go:42] POST /apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/rolebindings: (150.004265ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:default-rolebindings-controller] 127.0.0.1:40860]
    		I0523 09:29:44.250388    1186 wrap.go:42] POST /apis/rbac.authorization.k8s.io/v1/namespaces/openshift-infra/rolebindings: (153.021036ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:default-rolebindings-controller] 127.0.0.1:40860]
    		I0523 09:29:44.252740    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/build-controller-token-x2dpv: (8.148156ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.252990    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/default-token-4b5gz: (7.637759ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.255481    1186 request.go:485] Throttling request took 65.514174ms, request: PUT:https://127.0.0.1:22772/api/v1/namespaces/openshift-infra/serviceaccounts/ingress-to-route-controller
    		I0523 09:29:44.257044    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/build-config-change-controller-token-w4mbt: (10.816267ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.258701    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/horizontal-pod-autoscaler-token-sbxq4: (5.381414ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.258999    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/default-rolebindings-controller-token-px2x4: (7.854114ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.259536    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/cluster-quota-reconciliation-controller-token-sb2ml: (11.061073ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.260711    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/deployer-controller-token-qxlsx: (3.10277ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.260996    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/deploymentconfig-controller-token-b2zq7: (7.1984ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.263769    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/serviceaccounts/ingress-to-route-controller: (7.990485ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:40860]
    		I0523 09:29:44.265894    1186 create_dockercfg_secrets.go:441] Creating token secret "ingress-to-route-controller-token-c8w4v" for service account openshift-infra/ingress-to-route-controller
    		I0523 09:29:44.270610    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/horizontal-pod-autoscaler-token-sbxq4: (5.165699ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.272592    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/default-rolebindings-controller-token-px2x4: (5.67833ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.274115    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/deployer-controller-token-qxlsx: (6.659334ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.276993    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/deploymentconfig-controller-token-b2zq7: (7.11592ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.279776    1186 wrap.go:42] POST /apis/rbac.authorization.k8s.io/v1/namespaces/openshift-node/rolebindings: (78.344919ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:default-rolebindings-controller] 127.0.0.1:40860]
    		I0523 09:29:44.287228    1186 request.go:485] Throttling request took 93.288413ms, request: PUT:https://127.0.0.1:22772/api/v1/namespaces/openshift-infra/serviceaccounts/namespace-security-allocation-controller
    		I0523 09:29:44.290383    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/serviceaccounts/namespace-security-allocation-controller: (2.825589ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:40860]
    		I0523 09:29:44.290656    1186 create_dockercfg_secrets.go:441] Creating token secret "namespace-security-allocation-controller-token-9wjfz" for service account openshift-infra/namespace-security-allocation-controller
    		I0523 09:29:44.297625    1186 wrap.go:42] POST /apis/rbac.authorization.k8s.io/v1/namespaces/openshift-node/rolebindings: (17.27815ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:default-rolebindings-controller] 127.0.0.1:40860]
    		I0523 09:29:44.316583    1186 wrap.go:42] POST /apis/rbac.authorization.k8s.io/v1/namespaces/openshift-node/rolebindings: (18.444734ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:default-rolebindings-controller] 127.0.0.1:40860]
    		I0523 09:29:44.319489    1186 request.go:485] Throttling request took 125.427216ms, request: POST:https://127.0.0.1:22772/api/v1/namespaces/openshift-infra/secrets
    		I0523 09:29:44.321960    1186 wrap.go:42] POST /api/v1/namespaces/openshift-infra/secrets: (2.120427ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:40860]
    		I0523 09:29:44.323337    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/image-import-controller-token-lsl7j: (1.102273ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.330647    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/image-import-controller-token-lsl7j: (3.119872ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.351740    1186 request.go:485] Throttling request took 150.896016ms, request: PUT:https://127.0.0.1:22772/api/v1/namespaces/openshift-infra/serviceaccounts/node-bootstrapper
    		I0523 09:29:44.354804    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/serviceaccounts/node-bootstrapper: (2.687308ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:40860]
    		I0523 09:29:44.355098    1186 create_dockercfg_secrets.go:441] Creating token secret "node-bootstrapper-token-jhts5" for service account openshift-infra/node-bootstrapper
    		I0523 09:29:44.383995    1186 request.go:485] Throttling request took 154.685912ms, request: POST:https://127.0.0.1:22772/api/v1/namespaces/openshift-infra/secrets
    		I0523 09:29:44.386712    1186 wrap.go:42] POST /api/v1/namespaces/openshift-infra/secrets: (2.387839ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:40860]
    		I0523 09:29:44.388114    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/image-trigger-controller-token-c8nrl: (1.122158ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.395512    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/image-trigger-controller-token-c8nrl: (3.050208ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.416271    1186 request.go:485] Throttling request took 150.306802ms, request: POST:https://127.0.0.1:22772/api/v1/namespaces/openshift-infra/secrets
    		I0523 09:29:44.418950    1186 wrap.go:42] POST /api/v1/namespaces/openshift-infra/secrets: (2.274361ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:40860]
    		I0523 09:29:44.420611    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/ingress-to-route-controller-token-c8w4v: (1.27604ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.429254    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/ingress-to-route-controller-token-c8w4v: (3.283625ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.448404    1186 request.go:485] Throttling request took 157.687854ms, request: POST:https://127.0.0.1:22772/api/v1/namespaces/openshift-infra/secrets
    		I0523 09:29:44.451223    1186 wrap.go:42] POST /api/v1/namespaces/openshift-infra/secrets: (2.423509ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:40860]
    		I0523 09:29:44.453235    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/namespace-security-allocation-controller-token-9wjfz: (1.205349ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.461127    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/namespace-security-allocation-controller-token-9wjfz: (3.414959ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.463015    1186 wrap.go:42] GET /api/v1/namespaces/kube-system/configmaps/kube-scheduler: (2.755009ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:40416]
    		I0523 09:29:44.466295    1186 wrap.go:42] PUT /api/v1/namespaces/kube-system/configmaps/kube-scheduler: (2.681166ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:40416]
    		I0523 09:29:44.466547    1186 leaderelection.go:199] successfully renewed lease kube-system/kube-scheduler
    		I0523 09:29:44.480776    1186 request.go:485] Throttling request took 158.498795ms, request: PUT:https://127.0.0.1:22772/api/v1/namespaces/openshift-infra/serviceaccounts/origin-namespace-controller
    		I0523 09:29:44.484020    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/serviceaccounts/origin-namespace-controller: (2.819487ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:40860]
    		I0523 09:29:44.484288    1186 create_dockercfg_secrets.go:441] Creating token secret "origin-namespace-controller-token-g4gg7" for service account openshift-infra/origin-namespace-controller
    		I0523 09:29:44.495440    1186 wrap.go:42] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/admin?timeout=5m0s: (2.00636ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40858]
    		I0523 09:29:44.497916    1186 wrap.go:42] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/edit?timeout=5m0s: (1.574391ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40858]
    		I0523 09:29:44.500053    1186 wrap.go:42] GET /apis/rbac.authorization.k8s.io/v1/clusterroles/view?timeout=5m0s: (1.393306ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40858]
    		I0523 09:29:44.505174    1186 wrap.go:42] GET /api/v1/namespaces/default?timeout=5m0s: (1.864041ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40858]
    		I0523 09:29:44.507938    1186 wrap.go:42] GET /api/v1/namespaces/default: (1.429662ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40416]
    		I0523 09:29:44.508578    1186 wrap.go:42] GET /apis/project.openshift.io/v1/projects/default?timeout=5m0s: (2.7756ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40858]
    		I0523 09:29:44.510830    1186 wrap.go:42] GET /api/v1/namespaces/default: (1.051611ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40416]
    		I0523 09:29:44.511295    1186 wrap.go:42] GET /apis/project.openshift.io/v1/projects/default?timeout=5m0s: (2.006842ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40858]
    		I0523 09:29:44.512971    1186 request.go:485] Throttling request took 157.807686ms, request: POST:https://127.0.0.1:22772/api/v1/namespaces/openshift-infra/secrets
    		I0523 09:29:44.513613    1186 wrap.go:42] GET /apis/apiregistration.k8s.io/v1beta1/apiservices/v1.?timeout=5m0s: (1.825653ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40858]
    		I0523 09:29:44.515603    1186 wrap.go:42] POST /api/v1/namespaces/openshift-infra/secrets: (2.278539ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/system:serviceaccount:openshift-infra:serviceaccount-pull-secrets-controller] 127.0.0.1:40860]
    		I0523 09:29:44.517290    1186 wrap.go:42] GET /apis/apiregistration.k8s.io/v1beta1/apiservices/v1.project.openshift.io?timeout=5m0s: (3.068143ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40858]
    		I0523 09:29:44.517355    1186 wrap.go:42] GET /api/v1/namespaces/openshift-infra/secrets/node-bootstrapper-token-jhts5: (1.115228ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		I0523 09:29:44.518980    1186 wrap.go:42] GET /apis/apiregistration.k8s.io/v1beta1/apiservices/v1beta1.rbac.authorization.k8s.io?timeout=5m0s: (1.162826ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:40858]
    		INFO: 2018/05/23 09:29:44 ccBalancerWrapper: updating state and picker called by balancer: IDLE, 0xc435833e60
    		INFO: 2018/05/23 09:29:44 dialing to target with scheme: ""
    		INFO: 2018/05/23 09:29:44 could not get resolver for scheme: ""
    		INFO: 2018/05/23 09:29:44 balancerWrapper: is pickfirst: false
    		INFO: 2018/05/23 09:29:44 balancerWrapper: got update addr from Notify: [{127.0.0.1:27728 <nil>}]
    		INFO: 2018/05/23 09:29:44 ccBalancerWrapper: new subconn: [{127.0.0.1:27728 0  <nil>}]
    		INFO: 2018/05/23 09:29:44 balancerWrapper: handle subconn state change: 0xc42fc6d1a0, CONNECTING
    		INFO: 2018/05/23 09:29:44 ccBalancerWrapper: updating state and picker called by balancer: CONNECTING, 0xc435833e60
    		I0523 09:29:44.528950    1186 wrap.go:42] PUT /api/v1/namespaces/openshift-infra/secrets/node-bootstrapper-token-jhts5: (6.530997ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/tokens-controller] 127.0.0.1:40416]
    		INFO: 2018/05/23 09:29:44 balancerWrapper: handle subconn state change: 0xc42fc6d1a0, READY
    		INFO: 2018/05/23 09:29:44 ccBalancerWrapper: updating state and picker called by balancer: READY, 0xc435833e60
    		
    --- PASS: TestIntegration/TestAllowedSCCViaRBAC (42.79s)
    	runner_test.go:187: 
    		
    		=== OUTPUT
    		mespace project1
    		I0523 09:29:47.829005    1173 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace project1
    		I0523 09:29:47.829014    1173 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace project1
    		I0523 09:29:47.829041    1173 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace project1
    		I0523 09:29:47.829066    1173 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace project1
    		I0523 09:29:47.829108    1173 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace project1
    		I0523 09:29:47.829120    1173 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace project1
    		I0523 09:29:47.829129    1173 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace project1
    		I0523 09:29:47.829142    1173 matcher.go:342] got preallocated value for groups: 1000060000/10000 in namespace project1
    		I0523 09:29:47.829165    1173 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace project1
    		I0523 09:29:47.829178    1173 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace project1
    		I0523 09:29:47.829186    1173 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace project1
    		I0523 09:29:47.829212    1173 admission.go:217] validating pod test3 (generate: ) against providers anyuid,restricted,nonroot,hostmount-anyuid,hostnetwork,hostaccess,privileged
    		I0523 09:29:47.829382    1173 admission.go:170] pod test3 (generate: ) validated against provider hostaccess
    		I0523 09:29:47.831435    1173 wrap.go:42] POST /api/v1/namespaces/project1/pods?timeout=5m0s: (7.28647ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:55348]
    		I0523 09:29:47.832131    1173 factory.go:1147] About to try and schedule pod test3
    		I0523 09:29:47.832158    1173 scheduler.go:439] Attempting to schedule pod: project1/test3
    		I0523 09:29:47.832182    1173 scheduler.go:191] Failed to schedule pod: project1/test3
    		I0523 09:29:47.832261    1173 factory.go:1262] Unable to schedule project1 test3: no nodes are registered to the cluster; waiting
    		I0523 09:29:47.832335    1173 factory.go:1375] Updating pod condition for project1/test3 to (PodScheduled==False)
    		I0523 09:29:47.832356    1173 taint_manager.go:345] Noticed pod update: types.NamespacedName{Namespace:"project1", Name:"test3"}
    		I0523 09:29:47.832412    1173 pvc_protection_controller.go:276] Got event on pod project1/test3
    		I0523 09:29:47.832432    1173 disruption.go:328] addPod called on pod "test3"
    		I0523 09:29:47.832456    1173 disruption.go:403] No PodDisruptionBudgets found for pod test3, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:47.832460    1173 backoff_utils.go:79] Backing off 1s
    		I0523 09:29:47.832470    1173 disruption.go:331] No matching pdb for pod "test3"
    		I0523 09:29:47.833647    1173 admission.go:97] getting security context constraints for pod test4 (generate: ) in namespace project2 with user info &{user1 d0bf287a-5e6b-11e8-bcb0-0242ac110002 [system:authenticated:oauth system:authenticated] map[scopes.authorization.openshift.io:[]]}
    		I0523 09:29:47.834403    1173 admission.go:108] getting security context constraints for pod test4 (generate: ) with service account info &{system:serviceaccount:project2:default  [system:serviceaccounts system:serviceaccounts:project2] map[]}
    		I0523 09:29:47.836379    1173 wrap.go:42] PUT /api/v1/namespaces/project1/pods/test3/status: (3.59655ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/scheduler] 127.0.0.1:55028]
    		I0523 09:29:47.836804    1173 factory.go:1147] About to try and schedule pod test3
    		I0523 09:29:47.836824    1173 scheduler.go:439] Attempting to schedule pod: project1/test3
    		I0523 09:29:47.836844    1173 scheduler.go:191] Failed to schedule pod: project1/test3
    		I0523 09:29:47.836863    1173 pvc_protection_controller.go:276] Got event on pod project1/test3
    		I0523 09:29:47.836893    1173 factory.go:1262] Unable to schedule project1 test3: no nodes are registered to the cluster; waiting
    		I0523 09:29:47.836915    1173 disruption.go:340] updatePod called on pod "test3"
    		I0523 09:29:47.836933    1173 disruption.go:403] No PodDisruptionBudgets found for pod test3, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:47.836934    1173 factory.go:1375] Updating pod condition for project1/test3 to (PodScheduled==False)
    		I0523 09:29:47.836938    1173 disruption.go:343] No matching pdb for pod "test3"
    		W0523 09:29:47.836986    1173 factory.go:1304] Request for pod project1/test3 already in flight, abandoning
    		I0523 09:29:47.841208    1173 wrap.go:42] POST /api/v1/namespaces/project1/events: (8.240158ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:55028]
    		I0523 09:29:47.841535    1173 wrap.go:42] GET /api/v1/namespaces/project2: (6.156888ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:55028]
    		I0523 09:29:47.841760    1173 matcher.go:279] got preallocated values for min: 1000070000, max: 1000079999 for uid range in namespace project2
    		I0523 09:29:47.841774    1173 matcher.go:292] got preallocated value for level: s0:c8,c7 for selinux options in namespace project2
    		I0523 09:29:47.841783    1173 matcher.go:322] got preallocated value for groups: 1000070000/10000 in namespace project2
    		I0523 09:29:47.841802    1173 admission.go:217] validating pod test4 (generate: ) against providers restricted
    		I0523 09:29:47.841891    1173 admission.go:179] unable to validate pod test4 (generate: ) against any security context constraint: [provider restricted: .spec.securityContext.hostPID: Invalid value: true: Host PID is not allowed to be used spec.containers[0].securityContext.hostPID: Invalid value: true: Host PID is not allowed to be used]
    		I0523 09:29:47.842050    1173 wrap.go:42] POST /api/v1/namespaces/project2/pods?timeout=5m0s: (9.269641ms) 403 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:55348]
    		I0523 09:29:47.843241    1173 admission.go:97] getting security context constraints for pod test5 (generate: ) in namespace project1 with user info &{user2 d0de6228-5e6b-11e8-bcb0-0242ac110002 [system:authenticated:oauth system:authenticated] map[scopes.authorization.openshift.io:[]]}
    		I0523 09:29:47.843620    1173 admission.go:108] getting security context constraints for pod test5 (generate: ) with service account info &{system:serviceaccount:project1:default  [system:serviceaccounts system:serviceaccounts:project1] map[]}
    		I0523 09:29:47.846188    1173 wrap.go:42] GET /api/v1/namespaces/project1: (1.446515ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:55028]
    		I0523 09:29:47.846453    1173 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace project1
    		I0523 09:29:47.846487    1173 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace project1
    		I0523 09:29:47.846498    1173 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace project1
    		I0523 09:29:47.846506    1173 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace project1
    		I0523 09:29:47.846523    1173 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace project1
    		I0523 09:29:47.846534    1173 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace project1
    		I0523 09:29:47.846550    1173 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace project1
    		I0523 09:29:47.846559    1173 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace project1
    		I0523 09:29:47.846567    1173 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace project1
    		I0523 09:29:47.846580    1173 matcher.go:342] got preallocated value for groups: 1000060000/10000 in namespace project1
    		I0523 09:29:47.846629    1173 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace project1
    		I0523 09:29:47.846640    1173 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace project1
    		I0523 09:29:47.846647    1173 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace project1
    		I0523 09:29:47.846686    1173 admission.go:217] validating pod test5 (generate: ) against providers anyuid,restricted,nonroot,hostmount-anyuid,hostnetwork,hostaccess,privileged
    		I0523 09:29:47.846944    1173 admission.go:170] pod test5 (generate: ) validated against provider hostaccess
    		I0523 09:29:47.847333    1173 wrap.go:42] PATCH /api/v1/namespaces/project1/events/test3.15313c0ff1af8f98: (4.749087ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:55028]
    		I0523 09:29:47.849192    1173 taint_manager.go:345] Noticed pod update: types.NamespacedName{Namespace:"project1", Name:"test5"}
    		I0523 09:29:47.849208    1173 wrap.go:42] POST /api/v1/namespaces/project1/pods?timeout=5m0s: (6.511268ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:55348]
    		I0523 09:29:47.849238    1173 factory.go:1147] About to try and schedule pod test5
    		I0523 09:29:47.849251    1173 pvc_protection_controller.go:276] Got event on pod project1/test5
    		I0523 09:29:47.849252    1173 scheduler.go:439] Attempting to schedule pod: project1/test5
    		I0523 09:29:47.849272    1173 disruption.go:328] addPod called on pod "test5"
    		I0523 09:29:47.849284    1173 disruption.go:403] No PodDisruptionBudgets found for pod test5, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:47.849292    1173 disruption.go:331] No matching pdb for pod "test5"
    		I0523 09:29:47.849273    1173 scheduler.go:191] Failed to schedule pod: project1/test5
    		I0523 09:29:47.849317    1173 factory.go:1262] Unable to schedule project1 test5: no nodes are registered to the cluster; waiting
    		I0523 09:29:47.849340    1173 factory.go:1375] Updating pod condition for project1/test5 to (PodScheduled==False)
    		I0523 09:29:47.849449    1173 backoff_utils.go:79] Backing off 1s
    		I0523 09:29:47.850890    1173 admission.go:97] getting security context constraints for pod test6 (generate: ) in namespace project2 with user info &{user2 d0de6228-5e6b-11e8-bcb0-0242ac110002 [system:authenticated:oauth system:authenticated] map[scopes.authorization.openshift.io:[]]}
    		I0523 09:29:47.851389    1173 admission.go:108] getting security context constraints for pod test6 (generate: ) with service account info &{system:serviceaccount:project2:default  [system:serviceaccounts system:serviceaccounts:project2] map[]}
    		I0523 09:29:47.852585    1173 wrap.go:42] PUT /api/v1/namespaces/project1/pods/test5/status: (2.878987ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/scheduler] 127.0.0.1:55028]
    		I0523 09:29:47.852644    1173 pvc_protection_controller.go:276] Got event on pod project1/test5
    		I0523 09:29:47.852676    1173 disruption.go:340] updatePod called on pod "test5"
    		I0523 09:29:47.852690    1173 disruption.go:403] No PodDisruptionBudgets found for pod test5, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:47.852703    1173 disruption.go:343] No matching pdb for pod "test5"
    		I0523 09:29:47.852961    1173 factory.go:1147] About to try and schedule pod test5
    		I0523 09:29:47.852977    1173 scheduler.go:439] Attempting to schedule pod: project1/test5
    		I0523 09:29:47.853014    1173 scheduler.go:191] Failed to schedule pod: project1/test5
    		I0523 09:29:47.853032    1173 factory.go:1262] Unable to schedule project1 test5: no nodes are registered to the cluster; waiting
    		I0523 09:29:47.853052    1173 factory.go:1375] Updating pod condition for project1/test5 to (PodScheduled==False)
    		W0523 09:29:47.853092    1173 factory.go:1304] Request for pod project1/test5 already in flight, abandoning
    		I0523 09:29:47.853818    1173 wrap.go:42] POST /api/v1/namespaces/project1/events: (4.026766ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:55028]
    		I0523 09:29:47.854420    1173 wrap.go:42] GET /api/v1/namespaces/project2: (1.491425ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:55028]
    		I0523 09:29:47.854661    1173 matcher.go:292] got preallocated value for level: s0:c8,c7 for selinux options in namespace project2
    		I0523 09:29:47.854700    1173 matcher.go:279] got preallocated values for min: 1000070000, max: 1000079999 for uid range in namespace project2
    		I0523 09:29:47.854708    1173 matcher.go:292] got preallocated value for level: s0:c8,c7 for selinux options in namespace project2
    		I0523 09:29:47.854717    1173 matcher.go:322] got preallocated value for groups: 1000070000/10000 in namespace project2
    		I0523 09:29:47.854735    1173 matcher.go:292] got preallocated value for level: s0:c8,c7 for selinux options in namespace project2
    		I0523 09:29:47.854770    1173 matcher.go:292] got preallocated value for level: s0:c8,c7 for selinux options in namespace project2
    		I0523 09:29:47.854790    1173 matcher.go:279] got preallocated values for min: 1000070000, max: 1000079999 for uid range in namespace project2
    		I0523 09:29:47.854807    1173 matcher.go:292] got preallocated value for level: s0:c8,c7 for selinux options in namespace project2
    		I0523 09:29:47.854813    1173 matcher.go:322] got preallocated value for groups: 1000070000/10000 in namespace project2
    		I0523 09:29:47.854822    1173 matcher.go:342] got preallocated value for groups: 1000070000/10000 in namespace project2
    		I0523 09:29:47.854841    1173 matcher.go:279] got preallocated values for min: 1000070000, max: 1000079999 for uid range in namespace project2
    		I0523 09:29:47.854848    1173 matcher.go:292] got preallocated value for level: s0:c8,c7 for selinux options in namespace project2
    		I0523 09:29:47.854854    1173 matcher.go:322] got preallocated value for groups: 1000070000/10000 in namespace project2
    		I0523 09:29:47.854890    1173 admission.go:217] validating pod test6 (generate: ) against providers anyuid,restricted,nonroot,hostmount-anyuid,hostnetwork,hostaccess,privileged
    		I0523 09:29:47.855264    1173 admission.go:170] pod test6 (generate: ) validated against provider hostaccess
    		I0523 09:29:47.856957    1173 wrap.go:42] POST /api/v1/namespaces/project2/pods?timeout=5m0s: (6.835501ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:55348]
    		I0523 09:29:47.857271    1173 factory.go:1147] About to try and schedule pod test6
    		I0523 09:29:47.857285    1173 scheduler.go:439] Attempting to schedule pod: project2/test6
    		I0523 09:29:47.857300    1173 scheduler.go:191] Failed to schedule pod: project2/test6
    		I0523 09:29:47.857318    1173 factory.go:1262] Unable to schedule project2 test6: no nodes are registered to the cluster; waiting
    		I0523 09:29:47.857343    1173 factory.go:1375] Updating pod condition for project2/test6 to (PodScheduled==False)
    		I0523 09:29:47.857712    1173 backoff_utils.go:79] Backing off 1s
    		I0523 09:29:47.857785    1173 taint_manager.go:345] Noticed pod update: types.NamespacedName{Namespace:"project2", Name:"test6"}
    		I0523 09:29:47.857807    1173 pvc_protection_controller.go:276] Got event on pod project2/test6
    		I0523 09:29:47.857821    1173 disruption.go:328] addPod called on pod "test6"
    		I0523 09:29:47.857830    1173 disruption.go:403] No PodDisruptionBudgets found for pod test6, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:47.857838    1173 disruption.go:331] No matching pdb for pod "test6"
    		I0523 09:29:47.859180    1173 controller.go:537] quota admission added evaluator for: {security.openshift.io podsecuritypolicyselfsubjectreviews}
    		I0523 09:29:47.860019    1173 wrap.go:42] PATCH /api/v1/namespaces/project1/events/test5.15313c0ff2b37cd6: (5.415616ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:55028]
    		I0523 09:29:47.860890    1173 wrap.go:42] PUT /api/v1/namespaces/project2/pods/test6/status: (3.241029ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/scheduler] 127.0.0.1:55028]
    		I0523 09:29:47.861910    1173 pvc_protection_controller.go:276] Got event on pod project2/test6
    		I0523 09:29:47.861924    1173 disruption.go:340] updatePod called on pod "test6"
    		I0523 09:29:47.861947    1173 disruption.go:403] No PodDisruptionBudgets found for pod test6, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:47.861969    1173 disruption.go:343] No matching pdb for pod "test6"
    		I0523 09:29:47.862045    1173 factory.go:1147] About to try and schedule pod test6
    		I0523 09:29:47.862062    1173 scheduler.go:439] Attempting to schedule pod: project2/test6
    		I0523 09:29:47.862085    1173 scheduler.go:191] Failed to schedule pod: project2/test6
    		I0523 09:29:47.862123    1173 factory.go:1262] Unable to schedule project2 test6: no nodes are registered to the cluster; waiting
    		I0523 09:29:47.862152    1173 factory.go:1375] Updating pod condition for project2/test6 to (PodScheduled==False)
    		W0523 09:29:47.862194    1173 factory.go:1304] Request for pod project2/test6 already in flight, abandoning
    		I0523 09:29:47.863159    1173 wrap.go:42] GET /api/v1/namespaces/project1: (2.731281ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:55028]
    		I0523 09:29:47.863443    1173 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace project1
    		I0523 09:29:47.863810    1173 wrap.go:42] POST /apis/security.openshift.io/v1/namespaces/project1/podsecuritypolicyselfsubjectreviews?timeout=5m0s: (5.610117ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:55348]
    		I0523 09:29:47.864964    1173 wrap.go:42] POST /api/v1/namespaces/project2/events: (3.544911ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:55028]
    		I0523 09:29:47.867595    1173 wrap.go:42] GET /api/v1/namespaces/project2: (1.696328ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:55028]
    		I0523 09:29:47.867816    1173 matcher.go:292] got preallocated value for level: s0:c8,c7 for selinux options in namespace project2
    		I0523 09:29:47.868010    1173 wrap.go:42] POST /apis/security.openshift.io/v1/namespaces/project2/podsecuritypolicyselfsubjectreviews?timeout=5m0s: (3.325879ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:55348]
    		INFO: 2018/05/23 09:29:47 ccBalancerWrapper: updating state and picker called by balancer: IDLE, 0xc42d4bc060
    		INFO: 2018/05/23 09:29:47 dialing to target with scheme: ""
    		INFO: 2018/05/23 09:29:47 could not get resolver for scheme: ""
    		INFO: 2018/05/23 09:29:47 balancerWrapper: is pickfirst: false
    		INFO: 2018/05/23 09:29:47 balancerWrapper: got update addr from Notify: [{127.0.0.1:17937 <nil>}]
    		INFO: 2018/05/23 09:29:47 ccBalancerWrapper: new subconn: [{127.0.0.1:17937 0  <nil>}]
    		INFO: 2018/05/23 09:29:47 balancerWrapper: handle subconn state change: 0xc42defb3d0, CONNECTING
    		INFO: 2018/05/23 09:29:47 ccBalancerWrapper: updating state and picker called by balancer: CONNECTING, 0xc42d4bc060
    		I0523 09:29:47.870165    1173 wrap.go:42] PATCH /api/v1/namespaces/project2/events/test6.15313c0ff32d95ba: (4.349389ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:55028]
    		INFO: 2018/05/23 09:29:47 balancerWrapper: handle subconn state change: 0xc42defb3d0, READY
    		INFO: 2018/05/23 09:29:47 ccBalancerWrapper: updating state and picker called by balancer: READY, 0xc42d4bc060
    		INFO: 2018/05/23 09:29:47 balancerWrapper: got update addr from Notify: [{127.0.0.1:17937 <nil>}]
    		
    --- PASS: TestIntegration/TestAlwaysPullImagesOff (39.19s)
    	runner_test.go:187: 
    		
    		=== OUTPUT
    		8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:45458]
    		I0523 09:29:48.537895    1195 leaderelection.go:199] successfully renewed lease kube-system/kube-controller-manager
    		I0523 09:29:48.544834    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (1.040921ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.547513    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (994.38µs) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.554103    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (1.880499ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.555351    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (3.218805ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.555551    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (3.473193ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.556803    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (3.533221ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.556920    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (3.240293ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.557092    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (3.041489ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.557117    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (4.408306ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.557221    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (1.826566ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.557287    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (4.597181ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.556809    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (3.021255ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.557548    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (1.78495ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.557574    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (2.434792ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.557747    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (3.21056ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.569487    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (983.469µs) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.575661    1195 wrap.go:42] GET /api/v1/namespaces/integration/serviceaccounts/default?timeout=5m0s: (1.149958ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45730]
    		I0523 09:29:48.577788    1195 wrap.go:42] GET /api/v1/namespaces/integration/secrets/default-token-6wrps?timeout=5m0s: (1.448941ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45730]
    		I0523 09:29:48.579810    1195 wrap.go:42] GET /api/v1/namespaces/integration/secrets/default-dockercfg-njjr4?timeout=5m0s: (1.396727ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45730]
    		I0523 09:29:48.580202    1195 wrap.go:42] GET /api/v1/namespaces/openshift-web-console/configmaps/webconsole-config: (1.469822ms) 404 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.583063    1195 wrap.go:42] GET /api/v1/namespaces/integration/limitranges: (1.559273ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.583386    1195 admission.go:97] getting security context constraints for pod  (generate: test) in namespace integration with user info &{system:admin  [system:masters system:cluster-admins system:authenticated] map[]}
    		I0523 09:29:48.583508    1195 admission.go:108] getting security context constraints for pod  (generate: test) with service account info &{system:serviceaccount:integration:default  [system:serviceaccounts system:serviceaccounts:integration] map[]}
    		I0523 09:29:48.585674    1195 wrap.go:42] GET /api/v1/namespaces/integration: (1.163499ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.585956    1195 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:48.586008    1195 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace integration
    		I0523 09:29:48.586024    1195 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:48.586033    1195 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:48.586062    1195 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:48.586083    1195 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:48.586109    1195 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace integration
    		I0523 09:29:48.586120    1195 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:48.586128    1195 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:48.586144    1195 matcher.go:342] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:48.586168    1195 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace integration
    		I0523 09:29:48.586181    1195 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:48.586189    1195 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:48.586215    1195 admission.go:217] validating pod  (generate: test) against providers anyuid,restricted,nonroot,hostmount-anyuid,hostnetwork,hostaccess,privileged
    		I0523 09:29:48.586298    1195 admission.go:170] pod  (generate: test) validated against provider anyuid
    		I0523 09:29:48.588791    1195 wrap.go:42] POST /api/v1/namespaces/integration/pods?timeout=5m0s: (8.235245ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45730]
    		I0523 09:29:48.589153    1195 factory.go:1147] About to try and schedule pod test7szzk
    		I0523 09:29:48.589173    1195 scheduler.go:439] Attempting to schedule pod: integration/test7szzk
    		I0523 09:29:48.589193    1195 scheduler.go:191] Failed to schedule pod: integration/test7szzk
    		I0523 09:29:48.589243    1195 factory.go:1262] Unable to schedule integration test7szzk: no nodes are registered to the cluster; waiting
    		I0523 09:29:48.589278    1195 factory.go:1375] Updating pod condition for integration/test7szzk to (PodScheduled==False)
    		I0523 09:29:48.589296    1195 taint_manager.go:345] Noticed pod update: types.NamespacedName{Namespace:"integration", Name:"test7szzk"}
    		I0523 09:29:48.589360    1195 disruption.go:328] addPod called on pod "test7szzk"
    		I0523 09:29:48.589378    1195 disruption.go:403] No PodDisruptionBudgets found for pod test7szzk, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:48.589386    1195 disruption.go:331] No matching pdb for pod "test7szzk"
    		I0523 09:29:48.589419    1195 pvc_protection_controller.go:276] Got event on pod integration/test7szzk
    		I0523 09:29:48.589435    1195 backoff_utils.go:79] Backing off 1s
    		I0523 09:29:48.593078    1195 store.go:370] GuaranteedUpdate of /kubernetes.io/pods/integration/test7szzk failed because of a conflict, going to retry
    		I0523 09:29:48.593565    1195 wrap.go:42] PUT /api/v1/namespaces/integration/pods/test7szzk/status: (3.737345ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/scheduler] 127.0.0.1:45458]
    		I0523 09:29:48.593658    1195 disruption.go:340] updatePod called on pod "test7szzk"
    		I0523 09:29:48.593674    1195 disruption.go:403] No PodDisruptionBudgets found for pod test7szzk, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:48.593682    1195 disruption.go:343] No matching pdb for pod "test7szzk"
    		I0523 09:29:48.593701    1195 pvc_protection_controller.go:276] Got event on pod integration/test7szzk
    		I0523 09:29:48.593890    1195 factory.go:1147] About to try and schedule pod test7szzk
    		I0523 09:29:48.593909    1195 scheduler.go:439] Attempting to schedule pod: integration/test7szzk
    		I0523 09:29:48.593934    1195 scheduler.go:191] Failed to schedule pod: integration/test7szzk
    		I0523 09:29:48.593964    1195 factory.go:1262] Unable to schedule integration test7szzk: no nodes are registered to the cluster; waiting
    		I0523 09:29:48.594015    1195 factory.go:1375] Updating pod condition for integration/test7szzk to (PodScheduled==False)
    		W0523 09:29:48.594084    1195 factory.go:1304] Request for pod integration/test7szzk already in flight, abandoning
    		I0523 09:29:48.594999    1195 wrap.go:42] POST /api/v1/namespaces/integration/events: (5.037902ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:45458]
    		I0523 09:29:48.595980    1195 factory.go:1147] About to try and schedule pod test7szzk
    		I0523 09:29:48.596010    1195 scheduler.go:435] Skip schedule deleting pod: integration/test7szzk
    		I0523 09:29:48.596073    1195 resource_quota_monitor.go:352] QuotaMonitor process object: /v1, Resource=pods, namespace integration, name test7szzk, uid d65f0e00-5e6b-11e8-885b-0242ac110002, event type update
    		I0523 09:29:48.596158    1195 disruption.go:340] updatePod called on pod "test7szzk"
    		I0523 09:29:48.596175    1195 disruption.go:403] No PodDisruptionBudgets found for pod test7szzk, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:48.596184    1195 disruption.go:343] No matching pdb for pod "test7szzk"
    		I0523 09:29:48.596202    1195 pvc_protection_controller.go:276] Got event on pod integration/test7szzk
    		I0523 09:29:48.596268    1195 resource_quota_monitor.go:352] QuotaMonitor process object: /v1, Resource=pods, namespace integration, name test7szzk, uid d65f0e00-5e6b-11e8-885b-0242ac110002, event type update
    		I0523 09:29:48.597700    1195 wrap.go:42] DELETE /api/v1/namespaces/integration/pods/test7szzk?timeout=5m0s: (7.626176ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45730]
    		I0523 09:29:48.598025    1195 resource_quota_monitor.go:352] QuotaMonitor process object: /v1, Resource=pods, namespace integration, name test7szzk, uid d65f0e00-5e6b-11e8-885b-0242ac110002, event type delete
    		I0523 09:29:48.598078    1195 deployment_controller.go:357] Pod test7szzk deleted.
    		I0523 09:29:48.598091    1195 pvc_protection_controller.go:276] Got event on pod integration/test7szzk
    		I0523 09:29:48.598118    1195 taint_manager.go:338] Noticed pod deletion: types.NamespacedName{Namespace:"integration", Name:"test7szzk"}
    		I0523 09:29:48.598139    1195 resource_quota_monitor.go:352] QuotaMonitor process object: /v1, Resource=pods, namespace integration, name test7szzk, uid d65f0e00-5e6b-11e8-885b-0242ac110002, event type delete
    		I0523 09:29:48.598168    1195 disruption.go:369] deletePod called on pod "test7szzk"
    		I0523 09:29:48.598179    1195 disruption.go:403] No PodDisruptionBudgets found for pod test7szzk, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:48.598186    1195 disruption.go:372] No matching pdb for pod "test7szzk"
    		I0523 09:29:48.599008    1195 admission.go:97] getting security context constraints for pod  (generate: test) in namespace integration with user info &{system:admin  [system:masters system:cluster-admins system:authenticated] map[]}
    		I0523 09:29:48.599061    1195 admission.go:108] getting security context constraints for pod  (generate: test) with service account info &{system:serviceaccount:integration:default  [system:serviceaccounts system:serviceaccounts:integration] map[]}
    		I0523 09:29:48.602693    1195 wrap.go:42] GET /api/v1/namespaces/integration: (2.70332ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45458]
    		I0523 09:29:48.602920    1195 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:48.602955    1195 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace integration
    		I0523 09:29:48.602966    1195 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:48.602974    1195 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:48.602995    1195 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:48.603012    1195 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:48.603039    1195 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace integration
    		I0523 09:29:48.603054    1195 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:48.603062    1195 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:48.603081    1195 matcher.go:342] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:48.603125    1195 matcher.go:279] got preallocated values for min: 1000060000, max: 1000069999 for uid range in namespace integration
    		I0523 09:29:48.603137    1195 matcher.go:292] got preallocated value for level: s0:c8,c2 for selinux options in namespace integration
    		I0523 09:29:48.603145    1195 matcher.go:322] got preallocated value for groups: 1000060000/10000 in namespace integration
    		I0523 09:29:48.603169    1195 admission.go:217] validating pod  (generate: test) against providers anyuid,restricted,nonroot,hostmount-anyuid,hostnetwork,hostaccess,privileged
    		I0523 09:29:48.603229    1195 admission.go:170] pod  (generate: test) validated against provider anyuid
    		I0523 09:29:48.604441    1195 wrap.go:42] PATCH /api/v1/namespaces/integration/events/test7szzk.15313c101ecdfcc9: (7.896583ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:45458]
    		I0523 09:29:48.605262    1195 wrap.go:42] POST /api/v1/namespaces/integration/pods?timeout=5m0s: (6.895919ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8] 127.0.0.1:45730]
    		I0523 09:29:48.605610    1195 factory.go:1147] About to try and schedule pod testdzf9z
    		I0523 09:29:48.605625    1195 scheduler.go:439] Attempting to schedule pod: integration/testdzf9z
    		I0523 09:29:48.605645    1195 scheduler.go:191] Failed to schedule pod: integration/testdzf9z
    		I0523 09:29:48.605667    1195 factory.go:1262] Unable to schedule integration testdzf9z: no nodes are registered to the cluster; waiting
    		I0523 09:29:48.605705    1195 factory.go:1375] Updating pod condition for integration/testdzf9z to (PodScheduled==False)
    		I0523 09:29:48.606071    1195 backoff_utils.go:79] Backing off 1s
    		I0523 09:29:48.606256    1195 taint_manager.go:345] Noticed pod update: types.NamespacedName{Namespace:"integration", Name:"testdzf9z"}
    		I0523 09:29:48.606310    1195 disruption.go:328] addPod called on pod "testdzf9z"
    		I0523 09:29:48.606326    1195 disruption.go:403] No PodDisruptionBudgets found for pod testdzf9z, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:48.606334    1195 disruption.go:331] No matching pdb for pod "testdzf9z"
    		I0523 09:29:48.606356    1195 pvc_protection_controller.go:276] Got event on pod integration/testdzf9z
    		INFO: 2018/05/23 09:29:48 ccBalancerWrapper: updating state and picker called by balancer: IDLE, 0xc4347eefc0
    		INFO: 2018/05/23 09:29:48 dialing to target with scheme: ""
    		INFO: 2018/05/23 09:29:48 could not get resolver for scheme: ""
    		INFO: 2018/05/23 09:29:48 balancerWrapper: is pickfirst: false
    		INFO: 2018/05/23 09:29:48 balancerWrapper: got update addr from Notify: [{127.0.0.1:17234 <nil>}]
    		INFO: 2018/05/23 09:29:48 ccBalancerWrapper: new subconn: [{127.0.0.1:17234 0  <nil>}]
    		INFO: 2018/05/23 09:29:48 balancerWrapper: handle subconn state change: 0xc42f6aed50, CONNECTING
    		INFO: 2018/05/23 09:29:48 ccBalancerWrapper: updating state and picker called by balancer: CONNECTING, 0xc4347eefc0
    		I0523 09:29:48.609127    1195 wrap.go:42] PUT /api/v1/namespaces/integration/pods/testdzf9z/status: (2.709952ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/scheduler] 127.0.0.1:45458]
    		I0523 09:29:48.610164    1195 disruption.go:340] updatePod called on pod "testdzf9z"
    		I0523 09:29:48.610189    1195 disruption.go:403] No PodDisruptionBudgets found for pod testdzf9z, PodDisruptionBudget controller will avoid syncing.
    		I0523 09:29:48.610198    1195 disruption.go:343] No matching pdb for pod "testdzf9z"
    		I0523 09:29:48.610216    1195 pvc_protection_controller.go:276] Got event on pod integration/testdzf9z
    		I0523 09:29:48.610325    1195 factory.go:1147] About to try and schedule pod testdzf9z
    		I0523 09:29:48.610346    1195 scheduler.go:439] Attempting to schedule pod: integration/testdzf9z
    		I0523 09:29:48.610362    1195 scheduler.go:191] Failed to schedule pod: integration/testdzf9z
    		I0523 09:29:48.610396    1195 factory.go:1262] Unable to schedule integration testdzf9z: no nodes are registered to the cluster; waiting
    		I0523 09:29:48.610438    1195 factory.go:1375] Updating pod condition for integration/testdzf9z to (PodScheduled==False)
    		W0523 09:29:48.610488    1195 factory.go:1304] Request for pod integration/testdzf9z already in flight, abandoning
    		I0523 09:29:48.611356    1195 wrap.go:42] POST /api/v1/namespaces/integration/events: (6.326657ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:45458]
    		I0523 09:29:48.615151    1195 wrap.go:42] POST /api/v1/namespaces/integration/events: (3.009969ms) 201 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:45458]
    		I0523 09:29:48.619462    1195 wrap.go:42] PATCH /api/v1/namespaces/integration/events/testdzf9z.15313c101fc8a9a4: (3.649426ms) 200 [[integration.test/v1.10.0+b81c8f8 (linux/amd64) kubernetes/b81c8f8/leader-election] 127.0.0.1:45458]
    		INFO: 2018/05/23 09:29:48 balancerWrapper: handle subconn state change: 0xc42f6aed50, READY
    		INFO: 2018/05/23 09:29:48 ccBalancerWrapper: updating state and picker called by balancer: READY, 0xc4347eefc0
    		INFO: 2018/05/23 09:29:48 balancerWrapper: got update addr from Notify: [{127.0.0.1:17234 <nil>}]
    		
FAIL
exit status 1
FAIL	github.com/openshift/origin/test/integration/runner	2012.640s
[INFO] [09:29:49+0000] jUnit XML report placed at _output/scripts/test-integration/artifacts/gotest_report_10A9m.xml
Of 203 tests executed in 2012.640s, 201 succeeded, 2 failed, and 0 were skipped.

In suite "github.com/openshift/origin/test/integration/runner", test case "TestIntegration" failed:
runner_test.go:84: using existing binary

In suite "github.com/openshift/origin/test/integration/runner", test case "TestIntegration/TestOAuthServiceAccountClientEvent" failed:
runner_test.go:129: FAILED TestOAuthServiceAccountClientEvent, retrying:

oauth_serviceaccount_client_events_test.go:141: test-bad-url: annotationPrefix serviceaccounts.openshift.io/oauth-redirecturi.one, annotation foo:foo
oauth_serviceaccount_client_events_test.go:371: GET https://127.0.0.1:12713/oauth/authorize?client_id=system%!A(MISSING)serviceaccount%!A(MISSING)test-project%!A(MISSING)default&redirect_uri=foo%!A(MISSING)foo&response_type=code&scope=user%!A(MISSING)info+role%!A(MISSING)edit%!A(MISSING)test-project
oauth_serviceaccount_client_events_test.go:396: Bad Request: HTTP/2.0 400 Bad Request
Content-Length: 155
Cache-Control: no-cache, no-store, max-age=0, must-revalidate
Content-Type: application/json
Date: Wed, 23 May 2018 09:15:17 GMT
Expires: Fri, 01 Jan 1990 00:00:00 GMT
Pragma: no-cache

{"error":"server_error","error_description":"The authorization server encountered an unexpected condition that prevented it from fulfilling the request."}
oauth_serviceaccount_client_events_test.go:141: test-bad-url-parse: annotationPrefix serviceaccounts.openshift.io/oauth-redirecturi.one, annotation ::
oauth_serviceaccount_client_events_test.go:371: GET https://127.0.0.1:12713/oauth/authorize?client_id=system%!A(MISSING)serviceaccount%!A(MISSING)test-project%!A(MISSING)default&redirect_uri=%!A(MISSING)%!A(MISSING)&response_type=code&scope=user%!A(MISSING)info+role%!A(MISSING)edit%!A(MISSING)test-project
oauth_serviceaccount_client_events_test.go:396: Bad Request: HTTP/2.0 400 Bad Request
Content-Length: 155
Cache-Control: no-cache, no-store, max-age=0, must-revalidate
Content-Type: application/json
Date: Wed, 23 May 2018 09:15:17 GMT
Expires: Fri, 01 Jan 1990 00:00:00 GMT
Pragma: no-cache

{"error":"server_error","error_description":"The authorization server encountered an unexpected condition that prevented it from fulfilling the request."}
oauth_serviceaccount_client_events_test.go:141: test-bad-redirect-type-parse: annotationPrefix serviceaccounts.openshift.io/oauth-redirectreference.1, annotation {asdf":"adsf"}
oauth_serviceaccount_client_events_test.go:371: GET https://127.0.0.1:12713/oauth/authorize?client_id=system%!A(MISSING)serviceaccount%!A(MISSING)test-project%!A(MISSING)default&redirect_uri=%!B(MISSING)asdf%3A%!a(MISSING)dsf%7D&response_type=code&scope=user%!A(MISSING)info+role%!A(MISSING)edit%!A(MISSING)test-project
oauth_serviceaccount_client_events_test.go:396: Bad Request: HTTP/2.0 400 Bad Request
Content-Length: 155
Cache-Control: no-cache, no-store, max-age=0, must-revalidate
Content-Type: application/json
Date: Wed, 23 May 2018 09:15:17 GMT
Expires: Fri, 01 Jan 1990 00:00:00 GMT
Pragma: no-cache

{"error":"server_error","error_description":"The authorization server encountered an unexpected condition that prevented it from fulfilling the request."}
oauth_serviceaccount_client_events_test.go:141: test-bad-redirect-reference-kind: annotationPrefix serviceaccounts.openshift.io/oauth-redirectreference.1, annotation {"kind":"OAuthRedirectReference","apiVersion":"oauth.openshift.io/v1","metadata":{"creationTimestamp":null},"reference":{"group":"route.openshift.io","kind":"foo","name":"route1"}}
oauth_serviceaccount_client_events_test.go:371: GET https://127.0.0.1:12713/oauth/authorize?client_id=system%!A(MISSING)serviceaccount%!A(MISSING)test-project%!A(MISSING)default&redirect_uri=%!B(MISSING)%!k(MISSING)ind%3A%!O(MISSING)AuthRedirectReference%2C%!a(MISSING)piVersion%3A%!o(MISSING)auth.openshift.io%!F(MISSING)v1%2C%!m(MISSING)etadata%3A%!B(MISSING)%!c(MISSING)reationTimestamp%3Anull%!D(MISSING)%!C(MISSING)%!r(MISSING)eference%3A%!B(MISSING)%!g(MISSING)roup%3A%!r(MISSING)oute.openshift.io%2C%!k(MISSING)ind%3A%!f(MISSING)oo%2C%!n(MISSING)ame%3A%!r(MISSING)oute1%7D%!D(MISSING)%!A(MISSING)&response_type=code&scope=user%!A(MISSING)info+role%!A(MISSING)edit%!A(MISSING)test-project
oauth_serviceaccount_client_events_test.go:396: Bad Request: HTTP/2.0 400 Bad Request
Content-Length: 155
Cache-Control: no-cache, no-store, max-age=0, must-revalidate
Content-Type: application/json
Date: Wed, 23 May 2018 09:15:17 GMT
Expires: Fri, 01 Jan 1990 00:00:00 GMT
Pragma: no-cache

{"error":"server_error","error_description":"The authorization server encountered an unexpected condition that prevented it from fulfilling the request."}
oauth_serviceaccount_client_events_test.go:163: test-bad-redirect-reference-kind: expected 1 events, found 0
[ERROR] [09:29:49+0000] hack/test-go.sh exited with code 1 after 00h 33m 34s
[ERROR] [09:29:49+0000] PID 1253: hack/test-integration.sh:18: `COVERAGE_SPEC=" " DETECT_RACES=false TMPDIR="${BASETMPDIR}" TIMEOUT=45m GOTEST_FLAGS="${gotest_flags}" "${OS_ROOT}/hack/test-go.sh" "test/integration/runner"` exited with status 1.
[INFO] [09:29:49+0000] 		Stack Trace: 
[INFO] [09:29:49+0000] 		  1: hack/test-integration.sh:18: `COVERAGE_SPEC=" " DETECT_RACES=false TMPDIR="${BASETMPDIR}" TIMEOUT=45m GOTEST_FLAGS="${gotest_flags}" "${OS_ROOT}/hack/test-go.sh" "test/integration/runner"`
[INFO] [09:29:49+0000]   Exiting with code 1.
make: *** [test-integration] Error 1
[WARNING] [09:29:50+0000] Copying _output/local/releases from the container failed!
[WARNING] [09:29:50+0000] Error response from daemon: lstat /var/lib/docker/overlay2/3fa7cca1c6eada167dbb68a10a47127ca8662452bb276757607303366333c0d2/merged/go/src/github.com/openshift/origin/_output/local/releases: no such file or directory
[ERROR] [09:29:50+0000] PID 10842: hack/lib/build/environment.sh:172: `return "${exitcode}"` exited with status 2.
[INFO] [09:29:50+0000] 		Stack Trace: 
[INFO] [09:29:50+0000] 		  1: hack/lib/build/environment.sh:172: `return "${exitcode}"`
[INFO] [09:29:50+0000] 		  2: hack/lib/build/environment.sh:281: os::build::environment::withsource
[INFO] [09:29:50+0000] 		  3: hack/env:42: os::build::environment::run
[INFO] [09:29:50+0000]   Exiting with code 2.
++ export status=FAILURE
++ status=FAILURE
+ set +o xtrace
########## FINISHED STAGE: FAILURE: RUN INTEGRATION TESTS [00h 46m 19s] ##########
Build step 'Execute shell' marked build as failure
[PostBuildScript] - Executing post build scripts.
[workspace@2] $ /bin/bash /tmp/jenkins6720330346070910389.sh
########## STARTING STAGE: DOWNLOAD ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83
++ export PATH=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/gathered
+ rm -rf /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/gathered
+ mkdir -p /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/gathered
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo stat /data/src/github.com/openshift/origin/_output/scripts
  File: ‘/data/src/github.com/openshift/origin/_output/scripts’
  Size: 61        	Blocks: 0          IO Block: 4096   directory
Device: ca02h/51714d	Inode: 101204125   Links: 5
Access: (2755/drwxr-sr-x)  Uid: ( 1001/  origin)   Gid: ( 1003/origin-git)
Context: unconfined_u:object_r:container_file_t:s0
Access: 1970-01-01 00:00:00.000000000 +0000
Modify: 2018-05-23 08:44:59.000000000 +0000
Change: 2018-05-23 09:29:50.656792242 +0000
 Birth: -
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo chmod -R o+rX /data/src/github.com/openshift/origin/_output/scripts
+ scp -r -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel:/data/src/github.com/openshift/origin/_output/scripts /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/gathered
+ tree /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/gathered
/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/gathered
└── scripts
    ├── shell
    │   ├── artifacts
    │   ├── logs
    │   │   ├── 40945a5e12ddcb239ca4a38959eb7c5dc3c2b125deabdc62202ae3df40882a65.json
    │   │   └── scripts.log
    │   └── openshift.local.home
    ├── test-integration
    │   ├── artifacts
    │   │   ├── gotest_report_10A9m
    │   │   └── gotest_report_10A9m.xml
    │   ├── logs
    │   │   ├── raw_test_output.log
    │   │   ├── scripts.log
    │   │   └── test-go-err.log
    │   └── openshift.local.home
    └── test-tools
        ├── artifacts
        ├── logs
        │   ├── raw_test_output.log
        │   └── scripts.log
        └── openshift.local.home

13 directories, 9 files
+ exit 0
[workspace@2] $ /bin/bash /tmp/jenkins8859407383296892786.sh
########## STARTING STAGE: GENERATE ARTIFACTS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83
++ export PATH=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/generated
+ rm -rf /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/generated
+ mkdir /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/generated
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo docker version && sudo docker info && sudo docker images && sudo docker ps -a 2>&1'
  WARNING: You're not using the default seccomp profile
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo cat /etc/sysconfig/docker /etc/sysconfig/docker-network /etc/sysconfig/docker-storage /etc/sysconfig/docker-storage-setup /etc/systemd/system/docker.service 2>&1'
+ true
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo find /var/lib/docker/containers -name *.log | sudo xargs tail -vn +1 2>&1'
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'oc get --raw /metrics --server=https://$( uname --nodename ):10250 --config=/etc/origin/master/admin.kubeconfig 2>&1'
+ true
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo ausearch -m AVC -m SELINUX_ERR -m USER_AVC 2>&1'
+ true
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'oc get --raw /metrics --config=/etc/origin/master/admin.kubeconfig 2>&1'
+ true
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo df -T -h && sudo pvs && sudo vgs && sudo lvs && sudo findmnt --all 2>&1'
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo yum list installed 2>&1'
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl --dmesg --no-pager --all --lines=all 2>&1'
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel 'sudo journalctl _PID=1 --no-pager --all --lines=all 2>&1'
+ tree /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/generated
/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/generated
├── avc_denials.log
├── containers.log
├── dmesg.log
├── docker.config
├── docker.info
├── filesystem.info
├── installed_packages.log
├── master-metrics.log
├── node-metrics.log
└── pid1.journal

0 directories, 10 files
+ exit 0
[workspace@2] $ /bin/bash /tmp/jenkins1831403223914387150.sh
########## STARTING STAGE: FETCH SYSTEMD JOURNALS FROM THE REMOTE HOST ##########
+ [[ -s /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83
++ export PATH=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config
+ trap 'exit 0' EXIT
++ pwd
+ ARTIFACT_DIR=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/journals
+ rm -rf /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/journals
+ mkdir /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/journals
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit docker.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit dnsmasq.service --no-pager --all --lines=all
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config openshiftdevel sudo journalctl --unit systemd-journald.service --no-pager --all --lines=all
+ tree /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/journals
/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/artifacts/journals
├── dnsmasq.service
├── docker.service
└── systemd-journald.service

0 directories, 3 files
+ exit 0
[workspace@2] $ /bin/bash /tmp/jenkins8721291834852587223.sh
########## STARTING STAGE: ASSEMBLE GCS OUTPUT ##########
+ [[ -s /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83
++ export PATH=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config
+ trap 'exit 0' EXIT
+ mkdir -p gcs/artifacts gcs/artifacts/generated gcs/artifacts/journals gcs/artifacts/gathered
++ python -c 'import json; import urllib; print json.load(urllib.urlopen('\''https://ci.openshift.redhat.com/jenkins/job/test_pull_request_origin_integration/18066/api/json'\''))['\''result'\'']'
+ result=FAILURE
+ cat
++ date +%s
+ cat /var/lib/jenkins/jobs/test_pull_request_origin_integration/builds/18066/log
+ cp artifacts/generated/avc_denials.log artifacts/generated/containers.log artifacts/generated/dmesg.log artifacts/generated/docker.config artifacts/generated/docker.info artifacts/generated/filesystem.info artifacts/generated/installed_packages.log artifacts/generated/master-metrics.log artifacts/generated/node-metrics.log artifacts/generated/pid1.journal gcs/artifacts/generated/
+ cp artifacts/journals/dnsmasq.service artifacts/journals/docker.service artifacts/journals/systemd-journald.service gcs/artifacts/journals/
+ cp -r artifacts/gathered/scripts gcs/artifacts/
++ pwd
+ scp -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -r /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/gcs openshiftdevel:/data
+ scp -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /var/lib/jenkins/.config/gcloud/gcs-publisher-credentials.json openshiftdevel:/data/credentials.json
+ exit 0
[workspace@2] $ /bin/bash /tmp/jenkins4660163687938872198.sh
########## STARTING STAGE: PUSH THE ARTIFACTS AND METADATA ##########
+ [[ -s /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83
++ export PATH=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config
++ mktemp
+ script=/tmp/tmp.MF2eJN6K4h
+ cat
+ chmod +x /tmp/tmp.MF2eJN6K4h
+ scp -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config /tmp/tmp.MF2eJN6K4h openshiftdevel:/tmp/tmp.MF2eJN6K4h
+ ssh -F /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/.ssh_config -t openshiftdevel 'bash -l -c "timeout 300 /tmp/tmp.MF2eJN6K4h"'
+ cd /home/origin
+ trap 'exit 0' EXIT
+ [[ -n {"type":"presubmit","job":"test_pull_request_origin_integration","buildid":"97787ec0-5e64-11e8-b81f-0a58ac100343","refs":{"org":"openshift","repo":"origin","base_ref":"master","base_sha":"da9b09e2b035a62b0cfe2359da09539587fe7961","pulls":[{"number":19759,"author":"wozniakjan","sha":"4ee7fb9b57dd7fc5fa0527d4f9a0b57d09a24757"}]}} ]]
++ jq --compact-output .buildid
+ [[ "97787ec0-5e64-11e8-b81f-0a58ac100343" =~ ^"[0-9]+"$ ]]
+ echo 'Using BUILD_NUMBER'
Using BUILD_NUMBER
++ jq --compact-output '.buildid |= "18066"'
+ JOB_SPEC='{"type":"presubmit","job":"test_pull_request_origin_integration","buildid":"18066","refs":{"org":"openshift","repo":"origin","base_ref":"master","base_sha":"da9b09e2b035a62b0cfe2359da09539587fe7961","pulls":[{"number":19759,"author":"wozniakjan","sha":"4ee7fb9b57dd7fc5fa0527d4f9a0b57d09a24757"}]}}'
+ docker run -e 'JOB_SPEC={"type":"presubmit","job":"test_pull_request_origin_integration","buildid":"18066","refs":{"org":"openshift","repo":"origin","base_ref":"master","base_sha":"da9b09e2b035a62b0cfe2359da09539587fe7961","pulls":[{"number":19759,"author":"wozniakjan","sha":"4ee7fb9b57dd7fc5fa0527d4f9a0b57d09a24757"}]}}' -v /data:/data:z registry.svc.ci.openshift.org/ci/gcsupload:latest --dry-run=false --gcs-path=gs://origin-ci-test --gcs-credentials-file=/data/credentials.json --path-strategy=single --default-org=openshift --default-repo=origin /data/gcs/artifacts /data/gcs/build-log.txt /data/gcs/finished.json
Unable to find image 'registry.svc.ci.openshift.org/ci/gcsupload:latest' locally
Trying to pull repository registry.svc.ci.openshift.org/ci/gcsupload ... 
latest: Pulling from registry.svc.ci.openshift.org/ci/gcsupload
6d987f6f4279: Already exists
4cccebe844ee: Already exists
cafcba51f636: Pulling fs layer
cafcba51f636: Verifying Checksum
cafcba51f636: Download complete
cafcba51f636: Pull complete
Digest: sha256:c452798b56e3f4649c557c3ff7273126042065e3be152689a1ffb880638e617d
Status: Downloaded newer image for registry.svc.ci.openshift.org/ci/gcsupload:latest
{"component":"gcsupload","level":"info","msg":"Gathering artifacts from artifact directory: /data/gcs/artifacts","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/generated/avc_denials.log in artifact directory. Uploading as artifacts/generated/avc_denials.log\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/generated/containers.log in artifact directory. Uploading as artifacts/generated/containers.log\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/generated/dmesg.log in artifact directory. Uploading as artifacts/generated/dmesg.log\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/generated/docker.config in artifact directory. Uploading as artifacts/generated/docker.config\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/generated/docker.info in artifact directory. Uploading as artifacts/generated/docker.info\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/generated/filesystem.info in artifact directory. Uploading as artifacts/generated/filesystem.info\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/generated/installed_packages.log in artifact directory. Uploading as artifacts/generated/installed_packages.log\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/generated/master-metrics.log in artifact directory. Uploading as artifacts/generated/master-metrics.log\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/generated/node-metrics.log in artifact directory. Uploading as artifacts/generated/node-metrics.log\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/generated/pid1.journal in artifact directory. Uploading as artifacts/generated/pid1.journal\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/journals/dnsmasq.service in artifact directory. Uploading as artifacts/journals/dnsmasq.service\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/journals/docker.service in artifact directory. Uploading as artifacts/journals/docker.service\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/journals/systemd-journald.service in artifact directory. Uploading as artifacts/journals/systemd-journald.service\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/scripts/shell/logs/40945a5e12ddcb239ca4a38959eb7c5dc3c2b125deabdc62202ae3df40882a65.json in artifact directory. Uploading as artifacts/scripts/shell/logs/40945a5e12ddcb239ca4a38959eb7c5dc3c2b125deabdc62202ae3df40882a65.json\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/scripts/shell/logs/scripts.log in artifact directory. Uploading as artifacts/scripts/shell/logs/scripts.log\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/scripts/test-integration/artifacts/gotest_report_10A9m in artifact directory. Uploading as artifacts/scripts/test-integration/artifacts/gotest_report_10A9m\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/scripts/test-integration/artifacts/gotest_report_10A9m.xml in artifact directory. Uploading as artifacts/scripts/test-integration/artifacts/gotest_report_10A9m.xml\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/scripts/test-integration/logs/raw_test_output.log in artifact directory. Uploading as artifacts/scripts/test-integration/logs/raw_test_output.log\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/scripts/test-integration/logs/scripts.log in artifact directory. Uploading as artifacts/scripts/test-integration/logs/scripts.log\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/scripts/test-integration/logs/test-go-err.log in artifact directory. Uploading as artifacts/scripts/test-integration/logs/test-go-err.log\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/scripts/test-tools/logs/raw_test_output.log in artifact directory. Uploading as artifacts/scripts/test-tools/logs/raw_test_output.log\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","level":"info","msg":"Found /data/gcs/artifacts/scripts/test-tools/logs/scripts.log in artifact directory. Uploading as artifacts/scripts/test-tools/logs/scripts.log\n","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/build-log.txt","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/filesystem.info","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/journals/dnsmasq.service","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/journals/docker.service","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/shell/logs/scripts.log","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-tools/logs/scripts.log","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/directory/test_pull_request_origin_integration/18066.txt","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/latest-build.txt","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-integration/logs/scripts.log","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-tools/logs/raw_test_output.log","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-integration/artifacts/gotest_report_10A9m.xml","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-integration/logs/raw_test_output.log","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/installed_packages.log","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/containers.log","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-integration/artifacts/gotest_report_10A9m","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/avc_denials.log","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/shell/logs/40945a5e12ddcb239ca4a38959eb7c5dc3c2b125deabdc62202ae3df40882a65.json","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/finished.json","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/directory/test_pull_request_origin_integration/latest-build.txt","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/node-metrics.log","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/pid1.journal","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/dmesg.log","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/docker.config","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/journals/systemd-journald.service","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-integration/logs/test-go-err.log","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/docker.info","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/master-metrics.log","level":"info","msg":"Queued for upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/master-metrics.log","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:07Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-integration/artifacts/gotest_report_10A9m","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/journals/systemd-journald.service","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/containers.log","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/dmesg.log","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/directory/test_pull_request_origin_integration/18066.txt","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/latest-build.txt","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-tools/logs/raw_test_output.log","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/directory/test_pull_request_origin_integration/latest-build.txt","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/journals/dnsmasq.service","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/shell/logs/40945a5e12ddcb239ca4a38959eb7c5dc3c2b125deabdc62202ae3df40882a65.json","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/finished.json","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/node-metrics.log","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/docker.info","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/docker.config","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/avc_denials.log","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/installed_packages.log","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-integration/logs/test-go-err.log","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/pid1.journal","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/generated/filesystem.info","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/shell/logs/scripts.log","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-tools/logs/scripts.log","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-integration/artifacts/gotest_report_10A9m.xml","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/journals/docker.service","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-integration/logs/scripts.log","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/artifacts/scripts/test-integration/logs/raw_test_output.log","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","dest":"pr-logs/pull/19759/test_pull_request_origin_integration/18066/build-log.txt","level":"info","msg":"Finished upload","time":"2018-05-23T09:30:08Z"}
{"component":"gcsupload","level":"info","msg":"Finished upload to GCS","time":"2018-05-23T09:30:08Z"}
+ exit 0
+ set +o xtrace
########## FINISHED STAGE: SUCCESS: PUSH THE ARTIFACTS AND METADATA [00h 00m 06s] ##########
[workspace@2] $ /bin/bash /tmp/jenkins2036259316544843516.sh
########## STARTING STAGE: DEPROVISION CLOUD RESOURCES ##########
+ [[ -s /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/activate ]]
+ source /var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/activate
++ export VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83
++ VIRTUAL_ENV=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83
++ export PATH=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ PATH=/var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/bin:/sbin:/usr/sbin:/bin:/usr/bin
++ unset PYTHON_HOME
++ export OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config
++ OCT_CONFIG_HOME=/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config
+ oct deprovision

PLAYBOOK: main.yml *************************************************************
4 plays in /var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml

PLAY [ensure we have the parameters necessary to deprovision virtual hosts] ****

TASK [ensure all required variables are set] ***********************************
task path: /var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:9
skipping: [localhost] => (item=origin_ci_inventory_dir)  => {
    "changed": false, 
    "generated_timestamp": "2018-05-23 05:30:09.807026", 
    "item": "origin_ci_inventory_dir", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}
skipping: [localhost] => (item=origin_ci_aws_region)  => {
    "changed": false, 
    "generated_timestamp": "2018-05-23 05:30:09.809775", 
    "item": "origin_ci_aws_region", 
    "skip_reason": "Conditional check failed", 
    "skipped": true
}

PLAY [deprovision virtual hosts in EC2] ****************************************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

TASK [deprovision a virtual EC2 host] ******************************************
task path: /var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:28
included: /var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml for localhost

TASK [update the SSH configuration to remove AWS EC2 specifics] ****************
task path: /var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:2
ok: [localhost] => {
    "changed": false, 
    "generated_timestamp": "2018-05-23 05:30:10.609318", 
    "msg": ""
}

TASK [rename EC2 instance for termination reaper] ******************************
task path: /var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:8
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2018-05-23 05:30:11.184231", 
    "msg": "Tags {'Name': 'oct-terminate'} created for resource i-06b49eb3b668fcc01."
}

TASK [tear down the EC2 instance] **********************************************
task path: /var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:15
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2018-05-23 05:30:11.999095", 
    "instance_ids": [
        "i-06b49eb3b668fcc01"
    ], 
    "instances": [
        {
            "ami_launch_index": "0", 
            "architecture": "x86_64", 
            "block_device_mapping": {
                "/dev/sda1": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-0b0cd4ab5dd01950c"
                }, 
                "/dev/sdb": {
                    "delete_on_termination": true, 
                    "status": "attached", 
                    "volume_id": "vol-0665c18d421495eb3"
                }
            }, 
            "dns_name": "ec2-54-209-101-1.compute-1.amazonaws.com", 
            "ebs_optimized": false, 
            "groups": {
                "sg-7e73221a": "default"
            }, 
            "hypervisor": "xen", 
            "id": "i-06b49eb3b668fcc01", 
            "image_id": "ami-0f07d2d9a03af96ec", 
            "instance_type": "m4.xlarge", 
            "kernel": null, 
            "key_name": "libra", 
            "launch_time": "2018-05-23T08:38:21.000Z", 
            "placement": "us-east-1d", 
            "private_dns_name": "ip-172-18-7-164.ec2.internal", 
            "private_ip": "172.18.7.164", 
            "public_dns_name": "ec2-54-209-101-1.compute-1.amazonaws.com", 
            "public_ip": "54.209.101.1", 
            "ramdisk": null, 
            "region": "us-east-1", 
            "root_device_name": "/dev/sda1", 
            "root_device_type": "ebs", 
            "state": "running", 
            "state_code": 16, 
            "tags": {
                "Name": "oct-terminate", 
                "openshift_etcd": "", 
                "openshift_master": "", 
                "openshift_node": ""
            }, 
            "tenancy": "default", 
            "virtualization_type": "hvm"
        }
    ], 
    "tagged_instances": []
}

TASK [remove the serialized host variables] ************************************
task path: /var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/roles/aws-down/tasks/main.yml:22
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2018-05-23 05:30:12.279162", 
    "path": "/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory/host_vars/172.18.7.164.yml", 
    "state": "absent"
}

PLAY [deprovision virtual hosts locally manged by Vagrant] *********************

TASK [Gathering Facts] *********************************************************
ok: [localhost]

PLAY [clean up local configuration for deprovisioned instances] ****************

TASK [remove inventory configuration directory] ********************************
task path: /var/lib/jenkins/origin-ci-tool/7a5c5e83c372ad2e6b3b64b3efa16fe2cb37ef83/lib/python2.7/site-packages/oct/ansible/oct/playbooks/deprovision/main.yml:61
changed: [localhost] => {
    "changed": true, 
    "generated_timestamp": "2018-05-23 05:30:12.775337", 
    "path": "/var/lib/jenkins/jobs/test_pull_request_origin_integration/workspace@2/.config/origin-ci-tool/inventory", 
    "state": "absent"
}

PLAY RECAP *********************************************************************
localhost                  : ok=8    changed=4    unreachable=0    failed=0   

+ set +o xtrace
########## FINISHED STAGE: SUCCESS: DEPROVISION CLOUD RESOURCES [00h 00m 04s] ##########
Archiving artifacts
[BFA] Scanning build for known causes...
[BFA] Found failure cause(s):
[BFA] Unit or Integration Test Failed
[BFA] Job Stage Failed
[BFA] Done. 11s
[WS-CLEANUP] Deleting project workspace...[WS-CLEANUP] done
Finished: FAILURE